amd-drm-fixes-6.9-2024-03-21:

amdgpu:
 - Freesync fixes
 - UAF IOCTL fixes
 - Fix mmhub client ID mapping
 - IH 7.0 fix
 - DML2 fixes
 - VCN 4.0.6 fix
 - GART bind fix
 - GPU reset fix
 - SR-IOV fix
 - OD table handling fixes
 - Fix TA handling on boards without display hardware
 - DML1 fix
 - ABM fix
 - eDP panel fix
 - DPPCLK fix
 - HDCP fix
 - Revert incorrect error case handling in ioremap
 - VPE fix
 - HDMI fixes
 - SDMA 4.4.2 fix
 - Other misc fixes
 
 amdkfd:
 - Fix duplicate BO handling in process restore
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZfyuKAAKCRC93/aFa7yZ
 2KkzAQD0AQbf7C2JB7fpSBPi/r8mzLBpWYs0VlbOfkrf8TnjQAD/XTv/KXnm1JDb
 dXVvyagc5TITqxlyjwcJMDuH7jqXIwI=
 =IZBa
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.9-2024-03-21' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-fixes-6.9-2024-03-21:

amdgpu:
- Freesync fixes
- UAF IOCTL fixes
- Fix mmhub client ID mapping
- IH 7.0 fix
- DML2 fixes
- VCN 4.0.6 fix
- GART bind fix
- GPU reset fix
- SR-IOV fix
- OD table handling fixes
- Fix TA handling on boards without display hardware
- DML1 fix
- ABM fix
- eDP panel fix
- DPPCLK fix
- HDCP fix
- Revert incorrect error case handling in ioremap
- VPE fix
- HDMI fixes
- SDMA 4.4.2 fix
- Other misc fixes

amdkfd:
- Fix duplicate BO handling in process restore

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240321220514.1418288-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2024-03-22 10:33:27 +10:00
commit cafd86cbdc
64 changed files with 456 additions and 215 deletions

View File

@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{ {
int ret; int ret;
if (!adev->kfd.init_complete) if (!adev->kfd.init_complete || adev->kfd.client.dev)
return 0; return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",

View File

@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
mutex_lock(&process_info->lock); mutex_lock(&process_info->lock);
drm_exec_init(&exec, 0, 0); drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) { drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) { vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec); drm_exec_retry_on_contention(&exec);
if (unlikely(ret)) if (unlikely(ret)) {
pr_err("Locking VM PD failed, ret: %d\n", ret);
goto ttm_reserve_fail; goto ttm_reserve_fail;
}
} }
/* Reserve all BOs and page tables/directory. Add all BOs from /* Reserve all BOs and page tables/directory. Add all BOs from
@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
gobj = &mem->bo->tbo.base; gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1); ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec); drm_exec_retry_on_contention(&exec);
if (unlikely(ret)) if (unlikely(ret)) {
pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
goto ttm_reserve_fail; goto ttm_reserve_fail;
}
} }
} }
@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
* validations above would invalidate DMABuf imports again. * validations above would invalidate DMABuf imports again.
*/ */
ret = process_validate_vms(process_info, &exec.ticket); ret = process_validate_vms(process_info, &exec.ticket);
if (ret) if (ret) {
pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail; goto validate_map_fail;
}
/* Update mappings not managed by KFD */ /* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,

View File

@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* early on during init and before calling to RREG32. * early on during init and before calling to RREG32.
*/ */
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
if (!adev->reset_domain) { if (!adev->reset_domain)
r = -ENOMEM; return -ENOMEM;
goto unmap_memory;
}
/* detect hw virtualization here */ /* detect hw virtualization here */
amdgpu_detect_virtualization(adev); amdgpu_detect_virtualization(adev);
@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev); r = amdgpu_device_get_job_timeout_settings(adev);
if (r) { if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
goto unmap_memory; return r;
} }
amdgpu_device_set_mcbp(adev); amdgpu_device_set_mcbp(adev);
@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* early init functions */ /* early init functions */
r = amdgpu_device_ip_early_init(adev); r = amdgpu_device_ip_early_init(adev);
if (r) if (r)
goto unmap_memory; return r;
/* Get rid of things like offb */ /* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r) if (r)
goto unmap_memory; return r;
/* Enable TMZ based on IP_VERSION */ /* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev); amdgpu_gmc_tmz_set(adev);
@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->gmc.xgmi.supported) { if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev); r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r) if (r)
goto unmap_memory; return r;
} }
/* enable PCIE atomic ops */ /* enable PCIE atomic ops */
@ -4345,8 +4343,6 @@ release_ras_con:
failed: failed:
amdgpu_vf_error_trans_all(adev); amdgpu_vf_error_trans_all(adev);
unmap_memory:
iounmap(adev->rmmio);
return r; return r;
} }

View File

@ -2479,8 +2479,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
} }
for (i = 0; i < mgpu_info.num_dgpu; i++) { for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev; adev = mgpu_info.gpu_ins[i].adev;
if (!adev->kfd.init_complete) if (!adev->kfd.init_complete) {
kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev); amdgpu_amdkfd_device_init(adev);
amdgpu_amdkfd_drm_client_create(adev);
}
amdgpu_ttm_set_buffer_funcs_status(adev, true); amdgpu_ttm_set_buffer_funcs_status(adev, true);
} }
} }

View File

@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring); r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock); spin_unlock(&kiq->ring_lock);
if (r) if (r)
DRM_ERROR("KCQ enable failed\n"); DRM_ERROR("KGQ enable failed\n");
return r; return r;
} }

View File

@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
*/ */
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{ {
int r;
if (bo->kfd_bo) if (bo->kfd_bo)
return mmu_interval_notifier_insert(&bo->notifier, current->mm, r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo), addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops); &amdgpu_hmm_hsa_ops);
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, else
amdgpu_bo_size(bo), r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
&amdgpu_hmm_gfx_ops); amdgpu_bo_size(bo),
&amdgpu_hmm_gfx_ops);
if (r)
/*
* Make sure amdgpu_hmm_unregister() doesn't call
* mmu_interval_notifier_remove() when the notifier isn't properly
* initialized.
*/
bo->notifier.mm = NULL;
return r;
} }
/** /**

View File

@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
/* bypass hdcp initialization if dmu is harvested */
if (!amdgpu_device_has_display_hardware(psp->adev))
return 0;
if (!psp->hdcp_context.context.bin_desc.size_bytes || if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) { !psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->hdcp_context.context.initialized)
return 0;
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
} }
@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
/* bypass dtm initialization if dmu is harvested */
if (!amdgpu_device_has_display_hardware(psp->adev))
return 0;
if (!psp->dtm_context.context.bin_desc.size_bytes || if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) { !psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
if (!psp->dtm_context.context.initialized)
return 0;
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
} }
@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev)) if (amdgpu_sriov_vf(psp->adev))
return 0; return 0;
/* bypass securedisplay initialization if dmu is harvested */
if (!amdgpu_device_has_display_hardware(psp->adev))
return 0;
if (!psp->securedisplay_context.context.bin_desc.size_bytes || if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) { !psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");

View File

@ -864,6 +864,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags); gtt->ttm.dma_address, flags);
} }
gtt->bound = true;
} }
/* /*

View File

@ -60,6 +60,7 @@
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" #define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin" #define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin" #define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_RAVEN);
@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6); MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work); static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{ {
char ucode_prefix[30]; char ucode_prefix[30];
char fw_name[40]; char fw_name[40];
int r; int r, i;
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix)); for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
if (r) if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
amdgpu_ucode_release(&adev->vcn.fw); i == 1) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
}
r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
if (r) {
amdgpu_ucode_release(&adev->vcn.fw[i]);
return r;
}
}
return r; return r;
} }
@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
} }
} }
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention. /* Bit 20-23, it is encode major and non-zero for new naming convention.
@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
amdgpu_ucode_release(&adev->vcn.fw[j]);
} }
amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock); mutex_destroy(&adev->vcn.vcn_pg_lock);
@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned int offset; unsigned int offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes); offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) { if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, memcpy_toio(adev->vcn.inst[i].cpu_addr,
adev->vcn.fw[i]->data + offset,
le32_to_cpu(hdr->ucode_size_bytes)); le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx); drm_dev_exit(idx);
} }
@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;
hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
/* currently only support 2 FW instances */ /* currently only support 2 FW instances */
if (i >= 2) { if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n"); dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
} }
idx = AMDGPU_UCODE_ID_VCN + i; idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx; adev->firmware.ucode[idx].ucode_id = idx;
adev->firmware.ucode[idx].fw = adev->vcn.fw; adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);

View File

@ -306,7 +306,7 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn { struct amdgpu_vcn {
unsigned fw_version; unsigned fw_version;
struct delayed_work idle_work; struct delayed_work idle_work;
const struct firmware *fw; /* VCN firmware */ const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings; unsigned num_enc_rings;
enum amd_powergating_state cur_state; enum amd_powergating_state cur_state;
bool indirect_sram; bool indirect_sram;

View File

@ -575,9 +575,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
{ {
unsigned int ret; unsigned int ret;
if (ring->adev->vpe.collaborate_mode)
return ~0;
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0)); amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
amdgpu_ring_write(ring, lower_32_bits(addr)); amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr)); amdgpu_ring_write(ring, upper_32_bits(addr));

View File

@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{ {
if (amdgpu_sriov_vf(adev))
return;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
u32 tmp; u32 tmp;
int i; int i;
WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); if (!amdgpu_sriov_vf(adev))
WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v10_0_setup_rb(adev); gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev); gfx_v10_3_program_pbb_mode(adev);
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
gfx_v10_3_set_power_brake_sequence(adev); gfx_v10_3_set_power_brake_sequence(adev);
return r; return r;

View File

@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
{ {
uint64_t value; uint64_t value;
if (amdgpu_sriov_vf(adev))
return;
/* Program the AGP BAR */ /* Program the AGP BAR */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);

View File

@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out: out:
return (wptr & ih->ptr_mask); return (wptr & ih->ptr_mask);
} }

View File

@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0): case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1): case IP_VERSION(3, 3, 1):
mmhub_cid = mmhub_client_ids_v3_3[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;
break; break;
} }
if (!mmhub_cid && cid == 0x140)
mmhub_cid = "UMSCH";
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid); mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",

View File

@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell; u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i, unset = 0; int i;
for_each_inst(i, inst_mask) { for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring; sdma[i] = &adev->sdma.instance[i].ring;
if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
unset = 1;
}
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL); rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev, static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
uint32_t inst_mask) uint32_t inst_mask)
{ {
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
bool unset = false;
for_each_inst(i, inst_mask) { for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].page;
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
(!unset)) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
unset = true;
}
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL); rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
RB_ENABLE, 0); RB_ENABLE, 0);
@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
r = amdgpu_ring_test_helper(page); r = amdgpu_ring_test_helper(page);
if (r) if (r)
return r; return r;
if (adev->mman.buffer_funcs_ring == page)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
} }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
} }
return r; return r;

View File

@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle)
*/ */
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset; uint32_t offset;
/* cache window 0: fw */ /* cache window 0: fw */
@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset; uint32_t offset;
/* cache window 0: fw */ /* cache window 0: fw */

View File

@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle)
*/ */
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset; uint32_t offset;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset; uint32_t offset;
/* cache window 0: fw */ /* cache window 0: fw */
@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset; init_table += header->vcn_table_offset;
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT( MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),

View File

@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle)
*/ */
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size;
uint32_t offset; uint32_t offset;
int i; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* cache window 0: fw */ /* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset; uint32_t offset;
/* cache window 0: fw */ /* cache window 0: fw */
@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* mc resume*/ /* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(

View File

@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle)
*/ */
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
uint32_t offset; uint32_t offset;
/* cache window 0: fw */ /* cache window 0: fw */
@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{ {
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset; uint32_t offset;
/* cache window 0: fw */ /* cache window 0: fw */
@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS), mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,

View File

@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */
@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{ {
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */
@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS), regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,

View File

@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst; uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx); vcn_inst = GET_INST(VCN, inst_idx);
@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */
@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,

View File

@ -45,7 +45,7 @@
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300 #define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN_HARVEST_MMSCH 0 #define VCN_HARVEST_MMSCH 0
@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */
@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */
@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/* VCN global tiling registers */ /* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
adev->gfx.config.gb_addr_config, 0, indirect);
} }
/** /**
@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, inst_idx, regUVD_MASTINT_EN), VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect) if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);

View File

@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */
@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size; uint32_t offset, size;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */ /* cache window 0: fw */

View File

@ -1767,6 +1767,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true; adev->dm.dc->debug.force_subvp_mclk_switch = true;
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@ -11271,18 +11274,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module) if (!adev->dm.freesync_module)
goto update; goto update;
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|| sink->sink_signal == SIGNAL_TYPE_EDP) { sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false; bool edid_check_required = false;
if (edid) { if (is_dp_capable_without_timing_msa(adev->dm.dc,
edid_check_required = is_dp_capable_without_timing_msa( amdgpu_dm_connector)) {
adev->dm.dc, if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
amdgpu_dm_connector); freesync_capable = true;
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
} else {
edid_check_required = edid->version > 1 ||
(edid->version == 1 &&
edid->revision > 1);
}
} }
if (edid_check_required == true && (edid->version > 1 || if (edid_check_required) {
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i]; timing = &edid->detailed_timings[i];

View File

@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true; edid_caps->panel_patch.remove_sink_ext_caps = true;
break; break;
@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi; edid_caps->edid_hdmi = connector->display_info.is_hdmi;
apply_edid_quirks(edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0) if (sad_count <= 0)
return result; return result;
@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
apply_edid_quirks(edid_buf, edid_caps);
kfree(sads); kfree(sads);
kfree(sadb); kfree(sadb);

View File

@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
/* DPPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
&num_entries_per_clk->num_dppclk_levels);
num_levels = num_entries_per_clk->num_dppclk_levels;
clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
//HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
if (num_entries_per_clk->num_dcfclk_levels && if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels && num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels) num_entries_per_clk->num_dispclk_levels)
@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz); = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
} }
for (i = 0; i < num_levels; i++)
if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
/* Get UCLK, update bounding box */ /* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);

View File

@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required(
} }
} }
/**
* wait_for_blank_complete - wait for all active OPPs to finish pending blank
* pattern updates
*
* @dc: [in] dc reference
* @context: [in] hardware context in use
*/
static void wait_for_blank_complete(struct dc *dc,
struct dc_state *context)
{
struct pipe_ctx *opp_head;
struct dce_hwseq *hws = dc->hwseq;
int i;
if (!hws->funcs.wait_for_blank_complete)
return;
for (i = 0; i < MAX_PIPES; i++) {
opp_head = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
continue;
hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
}
}
static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
{
struct pipe_ctx *otg_master;
struct timing_generator *tg;
int i;
for (i = 0; i < MAX_PIPES; i++) {
otg_master = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
continue;
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
wait_for_blank_complete(dc, context);
}
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{ {
int i; int i;
@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) { context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */ /* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context); wait_for_no_pipes_pending(dc, context);
/*
* optimized dispclk depends on ODM setup. Need to wait for ODM
* update pending complete before optimizing bandwidth.
*/
wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */ /* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context); dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg /* Need to do otg sync again as otg could be out of sync due to otg
@ -3270,6 +3323,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
if (stream->link->replay_settings.config.replay_supported) if (stream->link->replay_settings.config.replay_supported)
return true; return true;
if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
return true;
return false; return false;
} }
@ -3493,7 +3549,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0; top_pipe_to_program->stream->update_flags.raw = 0;
} }
static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{ {
/* /*
* This function calls HWSS to wait for any potentially double buffered * This function calls HWSS to wait for any potentially double buffered
@ -3531,6 +3587,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
} }
} }
} }
wait_for_odm_update_pending_complete(dc, dc_context);
} }
static void commit_planes_for_stream(struct dc *dc, static void commit_planes_for_stream(struct dc *dc,
@ -4844,22 +4901,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc)
bool dc_dmub_is_ips_idle_state(struct dc *dc) bool dc_dmub_is_ips_idle_state(struct dc *dc)
{ {
uint32_t idle_state = 0;
if (dc->debug.disable_idle_power_optimizations) if (dc->debug.disable_idle_power_optimizations)
return false; return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false; return false;
if (dc->hwss.get_idle_state) if (!dc->ctx->dmub_srv)
idle_state = dc->hwss.get_idle_state(dc); return false;
if (!(idle_state & DMUB_IPS1_ALLOW_MASK) || return dc->ctx->dmub_srv->idle_allowed;
!(idle_state & DMUB_IPS2_ALLOW_MASK))
return true;
return false;
} }
/* set min and max memory clock to lowest and highest DPM level, respectively */ /* set min and max memory clock to lowest and highest DPM level, respectively */

View File

@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref)
void dc_state_release(struct dc_state *state) void dc_state_release(struct dc_state *state)
{ {
kref_put(&state->refcount, dc_state_free); if (state != NULL)
kref_put(&state->refcount, dc_state_free);
} }
/* /*
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state. * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.

View File

@ -1085,9 +1085,9 @@ struct replay_settings {
/* SMU optimization is enabled */ /* SMU optimization is enabled */
bool replay_smu_opt_enable; bool replay_smu_opt_enable;
/* Current Coasting vtotal */ /* Current Coasting vtotal */
uint16_t coasting_vtotal; uint32_t coasting_vtotal;
/* Coasting vtotal table */ /* Coasting vtotal table */
uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */ /* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level; enum replay_link_off_frame_count_level link_off_frame_count_level;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */

View File

@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL, .opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL, .opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL, .dpg_is_blanked = NULL,
.dpg_is_pending = NULL,
.opp_destroy = opp1_destroy .opp_destroy = opp1_destroy
}; };

View File

@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0); (double_buffer_pending == 0);
} }
bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
{
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
uint32_t double_buffer_pending;
uint32_t dpg_en;
REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
return (dpg_en == 1 && double_buffer_pending == 1);
}
void opp2_program_left_edge_extra_pixel ( void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp, struct output_pixel_processor *opp,
bool count) bool count)
@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked, .dpg_is_blanked = opp2_dpg_is_blanked,
.dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy, .opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,

View File

@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
void opp2_dpg_set_blank_color( void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp, struct output_pixel_processor *opp,
const struct tg_color *color); const struct tg_color *color);

View File

@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked, .dpg_is_blanked = opp2_dpg_is_blanked,
.dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy, .opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,

View File

@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
pipe_cnt++; pipe_cnt++;
} }
} }
void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
{
if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
}

View File

@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface * - Not TMZ surface
*/ */
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&

View File

@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{ {
dml_uint_t width, height;
if (in->timing.h_addressable > 3840)
width = 3840;
else
width = in->timing.h_addressable; // 4K max
if (in->timing.v_addressable > 2160)
height = 2160;
else
height = in->timing.v_addressable; // 4K max
out->CursorBPP[location] = dml_cur_32bit; out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256; out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256; out->GPUVMMinPageSizeKBytes[location] = 256;
out->ViewportWidth[location] = in->timing.h_addressable; out->ViewportWidth[location] = width;
out->ViewportHeight[location] = in->timing.v_addressable; out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false; out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0; out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0; out->ViewportHeightChroma[location] = 0;
@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->HTapsChroma[location] = 0; out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0; out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0; out->SourceScan[location] = dml_rotation_0;
out->ScalerRecoutWidth[location] = in->timing.h_addressable; out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57; out->LBBitPerPixel[location] = 57;

View File

@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
} }
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{ {
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
if (!(*dml2))
return false;
// Store config options // Store config options
(*dml2)->config = *config; (*dml2)->config = *config;
@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
if (!(*dml2))
return false;
dml2_init(in_dc, config, dml2);
/*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
//dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true; return true;
} }
@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
return true; return true;
} }
void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
dml2_init(in_dc, config, dml2);
}

View File

@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2); struct dml2_context *src_dml2);
bool dml2_create_copy(struct dml2_context **dst_dml2, bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2); struct dml2_context *src_dml2);
void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2);
/* /*
* dml2_validate - Determines if a display configuration is supported or not. * dml2_validate - Determines if a display configuration is supported or not.

View File

@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
return; return;
} }
if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
resource_is_odm_topology_changed(new_pipe, old_pipe))
/* Detect odm changes */
new_pipe->update_flags.bits.odm = 1;
/* Exit on unchanged, unused pipe */ /* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state) if (!old_pipe->plane_state && !new_pipe->plane_state)
return; return;
@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
/* Detect top pipe only changes */ /* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
/* Detect odm changes */
if (resource_is_odm_topology_changed(new_pipe, old_pipe))
new_pipe->update_flags.bits.odm = 1;
/* Detect global sync changes */ /* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger); DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0; unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0; unsigned int hubp_count = 0;
struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context)) if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context); resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/ /*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer( dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); dc, pipe, pipe->plane_state->triplebuffer_flips);
} }
} }
} }
@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx(
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
} }
/* update ODM for blanked OTG master pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
!resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe->update_flags.bits.odm &&
hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
/* /*
* Program all updated pipes, order matters for mpcc setup. Start with * Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order * top pipe and program all pipes that follow in order
*/ */
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) { if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) { while (pipe) {
@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) { context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
} }
/* when dynamic ODM is active, pipes must be reconfigured when all planes are
* disabled, as some transitions will leave software and hardware state
* mismatched.
*/
if (dc->debug.enable_single_display_2to1_odm_policy &&
pipe->stream &&
pipe->update_flags.bits.disable &&
!pipe->prev_odm_pipe &&
hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
} }
} }
@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete(
int counter; int counter;
for (counter = 0; counter < 1000; counter++) { for (counter = 0; counter < 1000; counter++) {
if (opp->funcs->dpg_is_blanked(opp)) if (!opp->funcs->dpg_is_pending(opp))
break; break;
udelay(100); udelay(100);
@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete(
return false; return false;
} }
return true; return opp->funcs->dpg_is_blanked(opp);
} }
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)

View File

@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL) if (pipe_ctx == NULL)
return; return;
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.stream_enc,
enable); enable);
/* Wait for two frame to make sure AV mute is sent out */
if (enable) {
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
}
}
} }
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)

View File

@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
dsc->funcs->dsc_disconnect(dsc); dsc->funcs->dsc_disconnect(dsc);
} }
} }
if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
/*
* blank pattern is generated by OPP, reprogram blank pattern
* due to OPP count change
*/
dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
} }
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
@ -1778,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
} }
} }
void dcn32_interdependent_update_lock(struct dc *dc,
struct dc_state *context, bool lock)
{
unsigned int i;
struct pipe_ctx *pipe;
struct timing_generator *tg;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!tg->funcs->is_tg_enabled(tg) ||
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
if (lock)
dc->hwss.pipe_control_lock(dc, pipe, true);
else
dc->hwss.pipe_control_lock(dc, pipe, false);
}
}

View File

@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc, void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context); struct dc_state *context);
void dcn32_interdependent_update_lock(struct dc *dc,
struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */ #endif /* __DC_HWSS_DCN32_H__ */

View File

@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane, .disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data, .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock, .pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes, .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock, .cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth, .prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth,

View File

@ -337,6 +337,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)( bool (*dpg_is_blanked)(
struct output_pixel_processor *opp); struct output_pixel_processor *opp);
bool (*dpg_is_pending)(struct output_pixel_processor *opp);
void (*opp_dpg_set_blank_color)( void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp, struct output_pixel_processor *opp,
const struct tg_color *color); const struct tg_color *color);

View File

@ -331,6 +331,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg); void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
}; };
#endif #endif

View File

@ -285,12 +285,12 @@ struct link_service {
enum replay_FW_Message_type msg, enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data); union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)( bool (*edp_set_coasting_vtotal)(
struct dc_link *link, uint16_t coasting_vtotal); struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link, bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start, unsigned int *residency, const bool is_start,
const bool is_alpm); const bool is_alpm);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
const unsigned int *power_opts, uint16_t coasting_vtotal); const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link, bool (*edp_is_ilr_optimization_required)(struct dc_link *link,

View File

@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true; return true;
} }
bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{ {
struct dc *dc = link->ctx->dc; struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay; struct dmub_replay *replay = dc->res_pool->replay;
@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link,
} }
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
const unsigned int *power_opts, uint16_t coasting_vtotal) const unsigned int *power_opts, uint32_t coasting_vtotal)
{ {
struct dc *dc = link->ctx->dc; struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay; struct dmub_replay *replay = dc->res_pool->replay;

View File

@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link, bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg, enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data); union dmub_replay_cmd_set *cmd_data);
bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link, bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm); unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
const unsigned int *power_opts, uint16_t coasting_vtotal); const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link); bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link, bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing); struct dc_crtc_timing *crtc_timing);

View File

@ -557,7 +557,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\ type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\ type OTG_V_TOTAL_LAST_USED_BY_DRR;\
type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \ #define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL; type OTG_H_TIMING_DIV_MODE_MANUAL;

View File

@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
} }
} }
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
{
struct optc *optc1 = DCN10TG_FROM_TG(tg);
REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
}
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass, .set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine, .set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments, .get_odm_combine_segments = optc32_get_odm_combine_segments,
.wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source, .get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux, .set_out_mux = optc3_set_out_mux,

View File

@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc, void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing); const struct dc_crtc_timing *dc_crtc_timing);
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */ #endif /* __DC_OPTC_DCN32_H__ */

View File

@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context); dcn32_override_min_req_memclk(dc, context);
dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS(); BW_VAL_TRACE_END_WATERMARKS();
@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
{ {
DC_FP_START(); DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params); dcn32_update_bw_bounding_box_fpu(dc, bw_params);
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END(); DC_FP_END();
} }

View File

@ -42,6 +42,7 @@
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 #define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\ #define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base) container_of(pool, struct dcn32_resource_pool, base)
@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
/* definitions for run time init of reg offsets */ /* definitions for run time init of reg offsets */
/* CLK SRC */ /* CLK SRC */

View File

@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
{ {
DC_FP_START(); DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params); dcn321_update_bw_bounding_box_fpu(dc, bw_params);
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END(); DC_FP_END();
} }

View File

@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
* Currently the support is only for 0 or 1 * Currently the support is only for 0 or 1
*/ */
uint8_t panel_inst; uint8_t panel_inst;
/**
* 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
*/
uint16_t coasting_vtotal_high;
/**
* Explicit padding to 4 byte boundary.
*/
uint8_t pad[2];
}; };
/** /**

View File

@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)

View File

@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
void set_replay_coasting_vtotal(struct dc_link *link, void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type, enum replay_coasting_vtotal_type type,
uint16_t vtotal) uint32_t vtotal)
{ {
link->replay_settings.coasting_vtotal_table[type] = vtotal; link->replay_settings.coasting_vtotal_table[type] = vtotal;
} }

View File

@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
void init_replay_config(struct dc_link *link, struct replay_config *pr_config); void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link, void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type, enum replay_coasting_vtotal_type type,
uint16_t vtotal); uint32_t vtotal);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link, void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal); uint16_t vtotal, uint16_t htotal);

View File

@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit, uint32_t *max_power_limit,
uint32_t *min_power_limit) uint32_t *min_power_limit)
{ {
struct smu_11_0_powerplay_table *powerplay_table =
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable; PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t power_limit, od_percent_upper, od_percent_lower; uint32_t power_limit;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */ /* the last hope to figure out the ppt limit */
@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*current_power_limit = power_limit; *current_power_limit = power_limit;
if (default_power_limit) if (default_power_limit)
*default_power_limit = power_limit; *default_power_limit = power_limit;
if (max_power_limit)
if (smu->od_enabled) *max_power_limit = power_limit;
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); if (min_power_limit)
else *min_power_limit = power_limit;
od_percent_upper = 0;
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
if (max_power_limit) {
*max_power_limit = power_limit * (100 + od_percent_upper);
*max_power_limit /= 100;
}
if (min_power_limit) {
*min_power_limit = power_limit * (100 - od_percent_lower);
*min_power_limit /= 100;
}
return 0; return 0;
} }

View File

@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings; struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable; PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t power_limit, od_percent_upper, od_percent_lower; uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */ /* the last hope to figure out the ppt limit */
@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
if (default_power_limit) if (default_power_limit)
*default_power_limit = power_limit; *default_power_limit = power_limit;
if (smu->od_enabled && if (powerplay_table) {
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) if (smu->od_enabled &&
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
else od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
od_percent_upper = 0; od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
} else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); od_percent_upper = 0;
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
}
}
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit); od_percent_upper, od_percent_lower, power_limit);

View File

@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
return throttler_status; return throttler_status;
} }
static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODFEATURE_CAP cap)
{
return od_table->cap[cap];
}
static int sienna_cichlid_get_power_limit(struct smu_context *smu, static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit, uint32_t *current_power_limit,
uint32_t *default_power_limit, uint32_t *default_power_limit,
@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
{ {
struct smu_11_0_7_powerplay_table *powerplay_table = struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table; (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
uint32_t power_limit, od_percent_upper, od_percent_lower; struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint16_t *table_member; uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member); GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit) if (default_power_limit)
*default_power_limit = power_limit; *default_power_limit = power_limit;
if (smu->od_enabled) if (powerplay_table) {
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); if (smu->od_enabled &&
else sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
od_percent_upper = 0; od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
od_percent_upper = 0;
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
}
}
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit); od_percent_upper, od_percent_lower, power_limit);
@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0; return dpm_desc->SnapToDiscrete == 0;
} }
static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODFEATURE_CAP cap)
{
return od_table->cap[cap];
}
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table, static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODSETTING_ID setting, enum SMU_11_0_7_ODSETTING_ID setting,
uint32_t *min, uint32_t *max) uint32_t *min, uint32_t *max)

View File

@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table; (struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable; PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable; SkuTable_t *skutable = &pptable->SkuTable;
uint32_t power_limit, od_percent_upper, od_percent_lower; uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit)) if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit) if (default_power_limit)
*default_power_limit = power_limit; *default_power_limit = power_limit;
if (smu->od_enabled) if (powerplay_table) {
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); if (smu->od_enabled &&
else smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
od_percent_upper = 0; od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
od_percent_upper = 0;
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
}
}
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit); od_percent_upper, od_percent_lower, power_limit);

View File

@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table; (struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable; PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable; SkuTable_t *skutable = &pptable->SkuTable;
uint32_t power_limit, od_percent_upper, od_percent_lower; uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit)) if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit) if (default_power_limit)
*default_power_limit = power_limit; *default_power_limit = power_limit;
if (smu->od_enabled) if (powerplay_table) {
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); if (smu->od_enabled &&
else (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
od_percent_upper = 0; od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
od_percent_upper = 0;
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
}
}
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit); od_percent_upper, od_percent_lower, power_limit);