drm fixes for 6.1-rc3

sched:
 - Stop leaking fences when killing a sched entity.
 
 aperture:
 - Avoid uninitialized read in aperture_remove_conflicting_pci_device()
 
 bridge:
 - Fix HPD on bridge/ps8640.
 
 msm:
 - Fix shrinker deadlock
 - Fix crash during suspend after unbind
 - Fix IRQ lifetime issues
 - Fix potential memory corruption with too many bridges
 - Fix memory corruption on GPU state capture
 
 amdgpu:
 - Stable pstate fix
 - SMU 13.x updates
 - SR-IOV fixes
 - PCI AER fix
 - GC 11.x fixes
 - Display fixes
 - Expose IMU firmware version for debugging
 - Plane modifier fix
 - S0i3 fix
 
 amdkfd:
 - Fix possible memory leak
 - Fix GC 10.x cache info reporting
 
 i915:
 - Extend Wa_1607297627 to Alderlake-P
 - Keep PCI autosuspend control 'on' by default on all dGPU
 - Reset frl trained flag before restarting FRL training
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmNbS0cACgkQDHTzWXnE
 hr6hyA/+IumEuknnfu3b6gniD/ZpJ3Hkak4kTl+RYdgpzsBMS7nIcmSueX2BTJdP
 YEIZ8GzjN6/ECuu8crO9NZEz5ly+7RwRBhNuJ3sH4uYf6LpVzRmJX6Na+77Pm3li
 FuG1IjX9ZPtGGmvzZ55iXlMDk1lHfVQ3fzPCRe16J0Ze8LVw1HOuDxj6GkOhA1YJ
 oMCFbzt4DfSSGxStR5xi91DmL8aOL+y1BXeEf4xT6Ec79XYq+akNfB+9Zj0EFt5I
 CDZyAPI1BtRi+QCrxAYw/h7w84RreReOMQq5iW43wSZmxBosj7gr6EO2ZJJO8wie
 fsvShC6bubCaElrMY78UKk7WgFqLFKH+/yeooAlI7R6xNbpkxIboJfCjLrb/5ggM
 JMltFAQBkT+TaOcUYPdbt+p35O2t2JM0fTWBevqNNQk3X3kvW9dkO4mZ6kchKFsE
 CzZKu1pLdcMB26XH0e0ANgxnI42eW9uONrHs4dZgiLr72tyOIyp7v19tN8JHnI2g
 UBuUmHN9hPIde8Cmja5apnqHhTstN6dbbeKwOSlZfwrKmPstZHr9tUAAueeP6MFy
 lbn+ZiSABdjemMi4+P5ou0U1Q9jUONjO7yWZWkxSgiUvqmq4RiVpVHmLSc7Y8hlr
 UCIAUvyIgr/e1ySaeJjsNQzTjizHvYtW1f2UP/m23U4P0ZfHqYk=
 =UZ/k
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2022-10-28' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regularly scheduled fixes for drm, live from a Red Hat office for the
  first time in a while.

  The core has two fixes, one for scheduler leak and one for aperture
  uninit read.

  Otherwise a single bridge fix, and msm, amdgpu/kfd and i915 have a set
  of fixes each.

  sched:
   - Stop leaking fences when killing a sched entity.

  aperture:
   - Avoid uninitialized read in aperture_remove_conflicting_pci_device()

  bridge:
   - Fix HPD on bridge/ps8640.

  msm:
   - Fix shrinker deadlock
   - Fix crash during suspend after unbind
   - Fix IRQ lifetime issues
   - Fix potential memory corruption with too many bridges
   - Fix memory corruption on GPU state capture

  amdgpu:
   - Stable pstate fix
   - SMU 13.x updates
   - SR-IOV fixes
   - PCI AER fix
   - GC 11.x fixes
   - Display fixes
   - Expose IMU firmware version for debugging
   - Plane modifier fix
   - S0i3 fix

  amdkfd:
   - Fix possible memory leak
   - Fix GC 10.x cache info reporting

  i915:
   - Extend Wa_1607297627 to Alderlake-P
   - Keep PCI autosuspend control 'on' by default on all dGPU
   - Reset frl trained flag before restarting FRL training"

* tag 'drm-fixes-2022-10-28' of git://anongit.freedesktop.org/drm/drm: (39 commits)
  fbdev/core: Avoid uninitialized read in aperture_remove_conflicting_pci_device()
  drm/amdgpu: disallow gfxoff until GC IP blocks complete s2idle resume
  drm/scheduler: fix fence ref counting
  drm/amd/display: Revert logic for plane modifiers
  drm/amdkfd: correct the cache info for gfx1036
  drm/amdkfd: update gfx1037 Lx cache setting
  drm/amdgpu: skip mes self test for gc 11.0.3 in recover
  drm/amd: Add IMU fw version to fw version queries
  drm/amd/display: Don't return false if no stream
  drm/amd/display: Remove wrong pipe control lock
  drm/amd/pm: allow gfxoff on gc_11_0_3
  drm/amdkfd: Fix memory leak in kfd_mem_dmamap_userptr()
  drm/amdgpu: Remove ATC L2 access for MMHUB 2.1.x
  drm/i915/dp: Reset frl trained flag before restarting FRL training
  drm/i915/dgfx: Keep PCI autosuspend control 'on' by default on all dGPU
  drm/i915: Extend Wa_1607297627 to Alderlake-P
  drm/amdgpu: Adjust MES polling timeout for sriov
  drm/amd/pm: update driver-if header for smu_v13_0_10
  drm/amdgpu: fix pstate setting issue
  drm/bridge: ps8640: Add back the 50 ms mystery delay after HPD
  ...
This commit is contained in:
Linus Torvalds 2022-10-28 12:10:43 -07:00
commit e3493d6825
41 changed files with 421 additions and 161 deletions

View File

@ -510,13 +510,13 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem,
struct ttm_tt *ttm = bo->tbo.ttm;
int ret;
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
return -EINVAL;
ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
if (unlikely(!ttm->sg))
return -ENOMEM;
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
return -EINVAL;
/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
ttm->num_pages, 0,

View File

@ -326,7 +326,10 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
if (r)
return r;
ctx->stable_pstate = current_stable_pstate;
if (mgr->adev->pm.stable_pstate_ctx)
ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
else
ctx->stable_pstate = current_stable_pstate;
return 0;
}

View File

@ -3210,6 +3210,15 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
return r;
}
adev->ip_blocks[i].status.hw = true;
if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* disable gfxoff for IP resume. The gfxoff will be re-enabled in
* amdgpu_device_resume() after IP resume.
*/
amdgpu_gfx_off_ctrl(adev, false);
DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
}
}
return 0;
@ -4185,6 +4194,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
if (adev->in_s0ix) {
/* re-enable gfxoff after IP resume. This re-enables gfxoff after
* it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
*/
amdgpu_gfx_off_ctrl(adev, true);
DRM_DEBUG("will enable gfxoff for the mission mode\n");
}
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
@ -5381,7 +5397,7 @@ skip_hw_reset:
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
}
if (adev->enable_mes)
if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
amdgpu_mes_self_test(tmp_adev);
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {

View File

@ -344,6 +344,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->mes.ucode_fw_version[1];
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_IMU:
fw_info->ver = adev->gfx.imu_fw_version;
fw_info->feature = 0;
break;
default:
return -EINVAL;
}
@ -1520,6 +1524,15 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
fw_info.feature, fw_info.ver);
}
/* IMU */
query_fw.fw_type = AMDGPU_INFO_FW_IMU;
query_fw.index = 0;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* PSP SOS */
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);

View File

@ -698,6 +698,7 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(imu_fw_version, 0444, gfx.imu_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
@ -719,7 +720,8 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
&dev_attr_dmcu_fw_version.attr, NULL
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
NULL
};
static const struct attribute_group fw_attr_group = {

View File

@ -547,6 +547,7 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU, adev->gfx.imu_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
adev->psp.asd_context.bin_desc.fw_version);

View File

@ -70,6 +70,7 @@ enum amd_sriov_ucode_engine_id {
AMD_SRIOV_UCODE_ID_RLC_SRLS,
AMD_SRIOV_UCODE_ID_MEC,
AMD_SRIOV_UCODE_ID_MEC2,
AMD_SRIOV_UCODE_ID_IMU,
AMD_SRIOV_UCODE_ID_SOS,
AMD_SRIOV_UCODE_ID_ASD,
AMD_SRIOV_UCODE_ID_TA_RAS,

View File

@ -5051,6 +5051,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
amdgpu_gfx_off_ctrl(adev, enable);
break;
case IP_VERSION(11, 0, 1):

View File

@ -98,7 +98,14 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
unsigned long flags;
signed long timeout = adev->usec_timeout;
if (amdgpu_emu_mode) {
timeout *= 100;
} else if (amdgpu_sriov_vf(adev)) {
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
BUG_ON(size % 4 != 0);
spin_lock_irqsave(&mes->ring_lock, flags);
@ -118,7 +125,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
adev->usec_timeout * (amdgpu_emu_mode ? 100 : 1));
timeout);
if (r < 1) {
DRM_ERROR("MES failed to response msg=%d\n",
x_pkt->header.opcode);

View File

@ -32,8 +32,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "soc15_common.h"
#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d
#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
@ -574,7 +572,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 1, 2):
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
break;
default:
@ -608,8 +605,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 1, 2):
if (def != data)
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
if (def1 != data1)
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1);
break;
@ -634,8 +629,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 1, 2):
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
break;
/* There is no ATCL2 in MMHUB for 2.1.x */
return;
default:
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
break;
@ -646,18 +641,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
else
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
if (def != data) {
switch (adev->ip_versions[MMHUB_HWIP][0]) {
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 1, 2):
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
break;
default:
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
break;
}
}
if (def != data)
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
@ -695,7 +680,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 1, 2):
data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
/* There is no ATCL2 in MMHUB for 2.1.x. Keep the status
* based on DAGB
*/
data = MM_ATC_L2_MISC_CG__ENABLE_MASK;
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
break;
default:

View File

@ -795,6 +795,102 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
},
};
static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
};
static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
{
/* TCP L1 Cache per CU */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 1,
},
{
/* Scalar L1 Instruction Cache per SQC */
.cache_size = 32,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* Scalar L1 Data Cache per SQC */
.cache_size = 16,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* GL1 Data Cache per SA */
.cache_size = 128,
.cache_level = 1,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
{
/* L2 Data Cache per GPU (Total Tex Cache) */
.cache_size = 256,
.cache_level = 2,
.flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE),
.num_cu_shared = 2,
},
};
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
@ -1514,11 +1610,17 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
break;
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
pcache_info = yellow_carp_cache_info;
num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
break;
case IP_VERSION(10, 3, 6):
pcache_info = gc_10_3_6_cache_info;
num_of_cache_types = ARRAY_SIZE(gc_10_3_6_cache_info);
break;
case IP_VERSION(10, 3, 7):
pcache_info = gfx1037_cache_info;
num_of_cache_types = ARRAY_SIZE(gfx1037_cache_info);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):

View File

@ -1369,7 +1369,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
const struct drm_format_info *info = drm_format_info(format);
struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id;
int i;
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
@ -1386,49 +1386,13 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return true;
}
/* check if swizzle mode is supported by this version of DCN */
switch (asic_id.chip_family) {
case FAMILY_SI:
case FAMILY_CI:
case FAMILY_KV:
case FAMILY_CZ:
case FAMILY_VI:
/* asics before AI does not have modifier support */
return false;
case FAMILY_AI:
case FAMILY_RV:
case FAMILY_NV:
case FAMILY_VGH:
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_10_3_7:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
case AMD_FMT_MOD_TILE_GFX9_64K_D:
return true;
default:
return false;
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
case AMD_FMT_MOD_TILE_GFX9_64K_D:
return true;
default:
return false;
}
break;
default:
ASSERT(0); /* Unknown asic */
break;
/* Check that the modifier is on the list of the plane's supported modifiers. */
for (i = 0; i < plane->modifier_count; i++) {
if (modifier == plane->modifiers[i])
break;
}
if (i == plane->modifier_count)
return false;
/*
* For D swizzle the canonical modifier depends on the bpp, so check

View File

@ -1270,16 +1270,6 @@ void dcn20_pipe_control_lock(
lock,
&hw_locks,
&inst_flags);
} else if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
hw_lock_cmd.bits.lock_pipe = 1;
hw_lock_cmd.bits.otg_inst = pipe->stream_res.tg->inst;
hw_lock_cmd.bits.lock = lock;
if (!lock)
hw_lock_cmd.bits.should_release = 1;
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
} else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
if (lock)
pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
@ -1856,7 +1846,7 @@ void dcn20_post_unlock_program_front_end(
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
mdelay(1);
udelay(1);
}
}

View File

@ -200,7 +200,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
return false;
continue;
if (!pipe->plane_state)
return false;

View File

@ -25,7 +25,7 @@
#define SMU13_DRIVER_IF_V13_0_0_H
//Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x24
#define PPTABLE_VERSION 0x26
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@ -109,6 +109,22 @@
#define FEATURE_SPARE_63_BIT 63
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
(1 << FEATURE_DPM_UCLK_BIT) | \
(1 << FEATURE_DPM_FCLK_BIT) | \
(1 << FEATURE_DPM_SOCCLK_BIT) | \
(1 << FEATURE_DPM_MP0CLK_BIT) | \
(1 << FEATURE_DPM_LINK_BIT) | \
(1 << FEATURE_DPM_DCN_BIT) | \
(1 << FEATURE_DS_GFXCLK_BIT) | \
(1 << FEATURE_DS_SOCCLK_BIT) | \
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
(1 << FEATURE_DS_UCLK_BIT))
//For use with feature control messages
typedef enum {
FEATURE_PWR_ALL,
@ -133,6 +149,7 @@ typedef enum {
#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@ -262,15 +279,15 @@ typedef enum {
} I2cControllerPort_e;
typedef enum {
I2C_CONTROLLER_NAME_VR_GFX = 0,
I2C_CONTROLLER_NAME_VR_SOC,
I2C_CONTROLLER_NAME_VR_VMEMP,
I2C_CONTROLLER_NAME_VR_VDDIO,
I2C_CONTROLLER_NAME_LIQUID0,
I2C_CONTROLLER_NAME_LIQUID1,
I2C_CONTROLLER_NAME_PLX,
I2C_CONTROLLER_NAME_OTHER,
I2C_CONTROLLER_NAME_COUNT,
I2C_CONTROLLER_NAME_VR_GFX = 0,
I2C_CONTROLLER_NAME_VR_SOC,
I2C_CONTROLLER_NAME_VR_VMEMP,
I2C_CONTROLLER_NAME_VR_VDDIO,
I2C_CONTROLLER_NAME_LIQUID0,
I2C_CONTROLLER_NAME_LIQUID1,
I2C_CONTROLLER_NAME_PLX,
I2C_CONTROLLER_NAME_FAN_INTAKE,
I2C_CONTROLLER_NAME_COUNT,
} I2cControllerName_e;
typedef enum {
@ -282,16 +299,17 @@ typedef enum {
I2C_CONTROLLER_THROTTLER_LIQUID0,
I2C_CONTROLLER_THROTTLER_LIQUID1,
I2C_CONTROLLER_THROTTLER_PLX,
I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
I2C_CONTROLLER_THROTTLER_INA3221,
I2C_CONTROLLER_THROTTLER_COUNT,
} I2cControllerThrottler_e;
typedef enum {
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
I2C_CONTROLLER_PROTOCOL_INA3221,
I2C_CONTROLLER_PROTOCOL_COUNT,
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
I2C_CONTROLLER_PROTOCOL_INA3221,
I2C_CONTROLLER_PROTOCOL_COUNT,
} I2cControllerProtocol_e;
typedef struct {
@ -658,13 +676,20 @@ typedef struct {
#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
typedef enum {
FAN_MODE_AUTO = 0,
FAN_MODE_MANUAL_LINEAR,
} FanMode_e;
typedef struct {
uint32_t FeatureCtrlMask;
//Voltage control
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
uint16_t reserved[2];
uint16_t VddGfxVmax; // in mV
uint8_t IdlePwrSavingFeaturesCtrl;
uint8_t RuntimePwrSavingFeaturesCtrl;
//Frequency changes
int16_t GfxclkFmin; // MHz
@ -674,7 +699,7 @@ typedef struct {
//PPT
int16_t Ppt; // %
int16_t reserved1;
int16_t Tdc;
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
@ -701,16 +726,19 @@ typedef struct {
uint32_t FeatureCtrlMask;
int16_t VoltageOffsetPerZoneBoundary;
uint16_t reserved[2];
uint16_t VddGfxVmax; // in mV
uint16_t GfxclkFmin; // MHz
uint16_t GfxclkFmax; // MHz
uint8_t IdlePwrSavingFeaturesCtrl;
uint8_t RuntimePwrSavingFeaturesCtrl;
int16_t GfxclkFmin; // MHz
int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
//PPT
int16_t Ppt; // %
int16_t reserved1;
int16_t Tdc;
uint8_t FanLinearPwmPoints;
uint8_t FanLinearTempPoints;
@ -857,7 +885,8 @@ typedef struct {
uint16_t FanStartTempMin;
uint16_t FanStartTempMax;
uint32_t Spare[12];
uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
uint32_t Spare[11];
} MsgLimits_t;
@ -1041,7 +1070,17 @@ typedef struct {
uint32_t GfxoffSpare[15];
// GFX GPO
uint32_t GfxGpoSpare[16];
uint32_t DfllBtcMasterScalerM;
int32_t DfllBtcMasterScalerB;
uint32_t DfllBtcSlaveScalerM;
int32_t DfllBtcSlaveScalerB;
uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
uint32_t DfllL2FrequencyBoostM; //Unitless (float)
uint32_t DfllL2FrequencyBoostB; //In MHz (integer)
uint32_t GfxGpoSpare[8];
// GFX DCS
@ -1114,12 +1153,14 @@ typedef struct {
uint16_t IntakeTempHighIntakeAcousticLimit;
uint16_t IntakeTempAcouticLimitReleaseRate;
uint16_t FanStalledTempLimitOffset;
int16_t FanAbnormalTempLimitOffset;
uint16_t FanStalledTriggerRpm;
uint16_t FanAbnormalTriggerRpm;
uint16_t FanPadding;
uint16_t FanAbnormalTriggerRpmCoeff;
uint16_t FanAbnormalDetectionEnable;
uint32_t FanSpare[14];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding[3];
uint32_t FanSpare[13];
// SECTION: VDD_GFX AVFS
@ -1198,8 +1239,13 @@ typedef struct {
int16_t TotalBoardPowerM;
int16_t TotalBoardPowerB;
//PMFW-11158
QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
// SECTION: Sku Reserved
uint32_t Spare[61];
uint32_t Spare[43];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@ -1288,8 +1334,11 @@ typedef struct {
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
uint8_t FuseWritePadding[3];
// SECTION: Board Reserved
uint32_t BoardSpare[64];
uint32_t BoardSpare[63];
// SECTION: Structure Padding
@ -1381,7 +1430,7 @@ typedef struct {
uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t TempPadding;
uint16_t AvgTemperatureFanIntake;
uint8_t PcieRate ;
uint8_t PcieWidth ;
@ -1550,5 +1599,7 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
#endif

View File

@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D

View File

@ -289,7 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
case IP_VERSION(13, 0, 0):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0;
case IP_VERSION(13, 0, 10):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
break;
case IP_VERSION(13, 0, 7):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7;
@ -305,9 +306,6 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
case IP_VERSION(13, 0, 10):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10;
break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@ -842,6 +840,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)

View File

@ -105,6 +105,7 @@ struct ps8640 {
struct gpio_desc *gpio_powerdown;
struct device_link *link;
bool pre_enabled;
bool need_post_hpd_delay;
};
static const struct regmap_config ps8640_regmap_config[] = {
@ -173,14 +174,31 @@ static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wai
{
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
int status;
int ret;
/*
* Apparently something about the firmware in the chip signals that
* HPD goes high by reporting GPIO9 as high (even though HPD isn't
* actually connected to GPIO9).
*/
return regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
status & PS_GPIO9, wait_us / 10, wait_us);
ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
status & PS_GPIO9, wait_us / 10, wait_us);
/*
* The first time we see HPD go high after a reset we delay an extra
* 50 ms. The best guess is that the MCU is doing "stuff" during this
* time (maybe talking to the panel) and we don't want to interrupt it.
*
* No locking is done around "need_post_hpd_delay". If we're here we
* know we're holding a PM Runtime reference and the only other place
* that touches this is PM Runtime resume.
*/
if (!ret && ps_bridge->need_post_hpd_delay) {
ps_bridge->need_post_hpd_delay = false;
msleep(50);
}
return ret;
}
static int ps8640_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
@ -381,6 +399,9 @@ static int __maybe_unused ps8640_resume(struct device *dev)
msleep(50);
gpiod_set_value(ps_bridge->gpio_reset, 0);
/* We just reset things, so we need a delay after the first HPD */
ps_bridge->need_post_hpd_delay = true;
/*
* Mystery 200 ms delay for the "MCU to be ready". It's unclear if
* this is truly necessary since the MCU will already signal that

View File

@ -3957,6 +3957,8 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
intel_dp->frl.is_trained = false;
/* Restart FRL training or fall back to TMDS mode */
intel_dp_check_frl_training(intel_dp);
}

View File

@ -2293,11 +2293,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
* Wa_1607030317:tgl
* Wa_1607186500:tgl
* Wa_1607297627:tgl,rkl,dg1[a0]
* Wa_1607297627:tgl,rkl,dg1[a0],adlp
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate

View File

@ -591,8 +591,15 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
pm_runtime_use_autosuspend(kdev);
}
/* Enable by default */
pm_runtime_allow(kdev);
/*
* FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
* As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
* function will be unsupported in case PCIe endpoint function is in D3.
* Let's keep i915 autosuspend control 'on' till we fix all known issue
* with lmem access in D3.
*/
if (!IS_DGFX(i915))
pm_runtime_allow(kdev);
/*
* The core calls the driver load handler with an RPM reference held.

View File

@ -155,7 +155,7 @@ config DRM_MSM_HDMI
Compile in support for the HDMI output MSM DRM driver. It can
be a primary or a secondary display on device. Note that this is used
only for the direct HDMI output. If the device outputs HDMI data
throught some kind of DSI-to-HDMI bridge, this option can be disabled.
through some kind of DSI-to-HDMI bridge, this option can be disabled.
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"

View File

@ -91,7 +91,7 @@ struct a6xx_state_memobj {
static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
{
struct a6xx_state_memobj *obj =
kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
@ -813,6 +813,9 @@ static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
{
struct msm_gpu_state_bo *snapshot;
if (!bo->size)
return NULL;
snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
if (!snapshot)
return NULL;
@ -1040,8 +1043,13 @@ static void a6xx_gpu_state_destroy(struct kref *kref)
if (a6xx_state->gmu_hfi)
kvfree(a6xx_state->gmu_hfi->data);
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
kfree(obj);
if (a6xx_state->gmu_debug)
kvfree(a6xx_state->gmu_debug->data);
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
list_del(&obj->node);
kvfree(obj);
}
adreno_gpu_state_destroy(state);
kfree(a6xx_state);

View File

@ -679,6 +679,9 @@ static int adreno_system_suspend(struct device *dev)
struct msm_gpu *gpu = dev_to_gpu(dev);
int remaining, ret;
if (!gpu)
return 0;
suspend_scheduler(gpu);
remaining = wait_event_timeout(gpu->retire_event,
@ -700,7 +703,12 @@ out:
static int adreno_system_resume(struct device *dev)
{
resume_scheduler(dev_to_gpu(dev));
struct msm_gpu *gpu = dev_to_gpu(dev);
if (!gpu)
return 0;
resume_scheduler(gpu);
return pm_runtime_force_resume(dev);
}

View File

@ -729,7 +729,12 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
return buf;
}
/* len is expected to be in bytes */
/* len is expected to be in bytes
*
* WARNING: *ptr should be allocated with kvmalloc or friends. It can be free'd
* with kvfree() and replaced with a newly kvmalloc'd buffer on the first call
* when the unencoded raw data is encoded
*/
void adreno_show_object(struct drm_printer *p, void **ptr, int len,
bool *encoded)
{

View File

@ -56,8 +56,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
return ret;
}
static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);

View File

@ -1243,8 +1243,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
{
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding = DP_SET_ANSI_8B10B;
u8 ssc;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
u8 assr;
struct dp_link_info link_info = {0};
@ -1256,13 +1255,11 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd)) {
ssc = DP_SPREAD_AMP_0_5;
drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1);
}
if (drm_dp_max_downspread(dpcd))
encoding[0] |= DP_SPREAD_AMP_0_5;
drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
&encoding, 1);
/* config DOWNSPREAD_CTRL and MAIN_LINK_CHANNEL_CODING_SET */
drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, encoding, 2);
if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;

View File

@ -1249,7 +1249,7 @@ int dp_display_request_irq(struct msm_dp *dp_display)
return -EINVAL;
}
rc = devm_request_irq(&dp->pdev->dev, dp->irq,
rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
dp_display_irq_handler,
IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
if (rc < 0) {
@ -1528,6 +1528,11 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
}
}
static void of_dp_aux_depopulate_bus_void(void *data)
{
of_dp_aux_depopulate_bus(data);
}
static int dp_display_get_next_bridge(struct msm_dp *dp)
{
int rc;
@ -1552,10 +1557,16 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
* panel driver is probed asynchronously but is the best we
* can do without a bigger driver reorganization.
*/
rc = devm_of_dp_aux_populate_ep_devices(dp_priv->aux);
rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
of_node_put(aux_bus);
if (rc)
goto error;
rc = devm_add_action_or_reset(dp->drm_dev->dev,
of_dp_aux_depopulate_bus_void,
dp_priv->aux);
if (rc)
goto error;
} else if (dp->is_edp) {
DRM_ERROR("eDP aux_bus not found\n");
return -ENODEV;
@ -1568,7 +1579,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
* For DisplayPort interfaces external bridges are optional, so
* silently ignore an error if one is not present (-ENODEV).
*/
rc = dp_parser_find_next_bridge(dp_priv->parser);
rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser);
if (!dp->is_edp && rc == -ENODEV)
return 0;
@ -1597,6 +1608,12 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
return -EINVAL;
priv = dev->dev_private;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
dp_display->drm_dev = dev;
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);

View File

@ -31,6 +31,36 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
connector_status_disconnected;
}
static int dp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct msm_dp *dp;
dp = to_dp_bridge(bridge)->dp_display;
drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
(dp->is_connected) ? "true" : "false");
/*
* There is no protection in the DRM framework to check if the display
* pipeline has been already disabled before trying to disable it again.
* Hence if the sink is unplugged, the pipeline gets disabled, but the
* crtc->active is still true. Any attempt to set the mode or manually
* disable this encoder will result in the crash.
*
* TODO: add support for telling the DRM subsystem that the pipeline is
* disabled by the hardware and thus all access to it should be forbidden.
* After that this piece of code can be removed.
*/
if (bridge->ops & DRM_BRIDGE_OP_HPD)
return (dp->is_connected) ? 0 : -ENOTCONN;
return 0;
}
/**
* dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
* @bridge: Poiner to drm bridge
@ -61,6 +91,9 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
}
static const struct drm_bridge_funcs dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.enable = dp_bridge_enable,
.disable = dp_bridge_disable,
.post_disable = dp_bridge_post_disable,
@ -68,6 +101,7 @@ static const struct drm_bridge_funcs dp_bridge_ops = {
.mode_valid = dp_bridge_mode_valid,
.get_modes = dp_bridge_get_modes,
.detect = dp_bridge_detect,
.atomic_check = dp_bridge_atomic_check,
};
struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,

View File

@ -240,12 +240,12 @@ static int dp_parser_clock(struct dp_parser *parser)
return 0;
}
int dp_parser_find_next_bridge(struct dp_parser *parser)
int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser)
{
struct device *dev = &parser->pdev->dev;
struct platform_device *pdev = parser->pdev;
struct drm_bridge *bridge;
bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0);
if (IS_ERR(bridge))
return PTR_ERR(bridge);

View File

@ -138,8 +138,9 @@ struct dp_parser {
struct dp_parser *dp_parser_get(struct platform_device *pdev);
/**
* dp_parser_find_next_bridge() - find an additional bridge to DP
* devm_dp_parser_find_next_bridge() - find an additional bridge to DP
*
* @dev: device to tie bridge lifetime to
* @parser: dp_parser data from client
*
* This function is used to find any additional bridge attached to
@ -147,6 +148,6 @@ struct dp_parser *dp_parser_get(struct platform_device *pdev);
*
* Return: 0 if able to get the bridge, otherwise negative errno for failure.
*/
int dp_parser_find_next_bridge(struct dp_parser *parser);
int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser);
#endif

View File

@ -218,6 +218,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return -EINVAL;
priv = dev->dev_private;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);

View File

@ -300,6 +300,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct platform_device *pdev = hdmi->pdev;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
hdmi->dev = dev;
hdmi->encoder = encoder;
@ -339,7 +344,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
ret = devm_request_irq(&pdev->dev, hdmi->irq,
ret = devm_request_irq(dev->dev, hdmi->irq,
msm_hdmi_irq, IRQF_TRIGGER_HIGH,
"hdmi_isr", hdmi);
if (ret < 0) {

View File

@ -247,6 +247,7 @@ static int msm_drm_uninit(struct device *dev)
for (i = 0; i < priv->num_bridges; i++)
drm_bridge_remove(priv->bridges[i]);
priv->num_bridges = 0;
pm_runtime_get_sync(dev);
msm_irq_uninstall(ddev);

View File

@ -501,11 +501,11 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
unsigned cleanup_flags = BO_LOCKED;
unsigned i;
if (error)
cleanup_flags |= BO_VMA_PINNED;
cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@ -706,7 +706,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit = NULL;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
@ -946,8 +946,7 @@ out_unlock:
put_unused_fd(out_fence_fd);
mutex_unlock(&queue->lock);
out_post_unlock:
if (submit)
msm_gem_submit_put(submit);
msm_gem_submit_put(submit);
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);

View File

@ -997,4 +997,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
}
msm_devfreq_cleanup(gpu);
platform_set_drvdata(gpu->pdev, NULL);
}

View File

@ -280,6 +280,10 @@ struct msm_gpu {
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
if (!adreno_smmu)
return NULL;
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}

View File

@ -25,7 +25,8 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_lock(obj);
msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
submit->bos[i].flags &= ~BO_VMA_PINNED;
msm_gem_unpin_locked(obj);
submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);
msm_gem_unlock(obj);
}

View File

@ -207,6 +207,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
dma_fence_put(f);
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
@ -234,8 +235,10 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
while ((f = drm_sched_job_dependency(job, entity)))
while ((f = drm_sched_job_dependency(job, entity))) {
dma_fence_wait(f, false);
dma_fence_put(f);
}
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
@ -250,6 +253,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
continue;
}
dma_fence_get(entity->last_scheduled);
r = dma_fence_add_callback(entity->last_scheduled,
&job->finish_cb,
drm_sched_entity_kill_jobs_cb);

View File

@ -340,12 +340,9 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
size = pci_resource_len(pdev, bar);
ret = aperture_remove_conflicting_devices(base, size, primary, name);
if (ret)
break;
return ret;
}
if (ret)
return ret;
/*
* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over.

View File

@ -763,6 +763,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_MES_KIQ 0x19
/* Subquery id: Query MES firmware version */
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f