Merge branch 'drm-fixes-4.2' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
More radeon and amdgpu fixes for 4.2. Mostly amdgpu bug fixes. * 'drm-fixes-4.2' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu/dce8: Re-set VBLANK interrupt state when enabling a CRTC drm/radeon/ci: silence a harmless PCC warning drm/amdgpu/cz: silence some dpm debug output drm/amdgpu/cz: store the forced dpm level drm/amdgpu/cz: unforce dpm levels before forcing to low/high drm/amdgpu: remove bogus check in gfx8 rb setup drm/amdgpu: set proper index/data pair for smc regs on CZ (v2) drm/amdgpu: disable the IP module if early_init returns -ENOENT (v2) drm/amdgpu: stop context leak in the error path drm/amdgpu: validate the context id in the dependencies drm/radeon: fix user ptr race condition drm/radeon: Don't flush the GART TLB if rdev->gart.ptr == NULL drm/radeon: add a dpm quirk for Sapphire Radeon R9 270X 2GB GDDR5
This commit is contained in:
commit
90438ac813
@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_ib *ib;
|
||||
int i, j, r;
|
||||
|
||||
@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
for (j = 0; j < num_deps; ++j) {
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_ctx *ctx;
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
|
||||
deps[j].ip_instance,
|
||||
@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
|
||||
if (ctx == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_fence_recreate(ring, p->filp,
|
||||
deps[j].handle,
|
||||
&fence);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
amdgpu_sync_fence(&ib->sync, fence);
|
||||
amdgpu_fence_unref(&fence);
|
||||
amdgpu_ctx_put(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &ring);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = fence_wait_timeout(&fence->base, true, timeout);
|
||||
amdgpu_fence_unref(&fence);
|
||||
|
@ -1207,10 +1207,15 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||
} else {
|
||||
if (adev->ip_blocks[i].funcs->early_init) {
|
||||
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
|
||||
if (r)
|
||||
if (r == -ENOENT)
|
||||
adev->ip_block_enabled[i] = false;
|
||||
else if (r)
|
||||
return r;
|
||||
else
|
||||
adev->ip_block_enabled[i] = true;
|
||||
} else {
|
||||
adev->ip_block_enabled[i] = true;
|
||||
}
|
||||
adev->ip_block_enabled[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1679,25 +1679,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
DRM_INFO("DPM unforce state min=%d, max=%d.\n",
|
||||
pi->sclk_dpm.soft_min_clk,
|
||||
pi->sclk_dpm.soft_max_clk);
|
||||
DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
|
||||
pi->sclk_dpm.soft_min_clk,
|
||||
pi->sclk_dpm.soft_max_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (level) {
|
||||
case AMDGPU_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = cz_dpm_unforce_dpm_levels(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = cz_dpm_force_highest(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case AMDGPU_DPM_FORCED_LEVEL_LOW:
|
||||
ret = cz_dpm_unforce_dpm_levels(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = cz_dpm_force_lowest(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1711,6 +1717,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
|
||||
break;
|
||||
}
|
||||
|
||||
adev->pm.dpm.forced_level = level;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
unsigned type;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
dce_v8_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v8_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v8_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
@ -1813,10 +1813,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
|
||||
u32 data, mask;
|
||||
|
||||
data = RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
if (data & 1)
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
else
|
||||
data = 0;
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
|
||||
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
|
||||
|
@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
}
|
||||
|
||||
/* smu_8_0_d.h */
|
||||
#define mmMP0PUB_IND_INDEX 0x180
|
||||
#define mmMP0PUB_IND_DATA 0x181
|
||||
|
||||
static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
WREG32(mmMP0PUB_IND_INDEX, (reg));
|
||||
r = RREG32(mmMP0PUB_IND_DATA);
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
WREG32(mmMP0PUB_IND_INDEX, (reg));
|
||||
WREG32(mmMP0PUB_IND_DATA, (v));
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
}
|
||||
|
||||
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
|
||||
bool smc_enabled = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->smc_rreg = &vi_smc_rreg;
|
||||
adev->smc_wreg = &vi_smc_wreg;
|
||||
if (adev->flags & AMDGPU_IS_APU) {
|
||||
adev->smc_rreg = &cz_smc_rreg;
|
||||
adev->smc_wreg = &cz_smc_wreg;
|
||||
} else {
|
||||
adev->smc_rreg = &vi_smc_rreg;
|
||||
adev->smc_wreg = &vi_smc_wreg;
|
||||
}
|
||||
adev->pcie_rreg = &vi_pcie_rreg;
|
||||
adev->pcie_wreg = &vi_pcie_wreg;
|
||||
adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
|
||||
|
@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
||||
tmp |= DPM_ENABLED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
|
||||
DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
|
||||
break;
|
||||
}
|
||||
WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
|
||||
|
@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
if (rdev->gart.ptr) {
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
if (rdev->gart.ptr) {
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
radeon_mn_unregister(robj);
|
||||
radeon_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
bo = container_of(tbo, struct radeon_bo, tbo);
|
||||
|
||||
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
|
||||
radeon_mn_unregister(bo);
|
||||
|
||||
mutex_lock(&bo->rdev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
|
@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
||||
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user