Merge tag 'amd-drm-next-5.20-2022-07-14' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.20-2022-07-14: amdgpu: - DCN3.2 updates - DC SubVP support - DP MST fixes - Audio fixes - DC code cleanup - SMU13 updates - Adjust GART size on newer APUs for S/G display - Soft reset for GFX 11 - Soft reset for SDMA 6 - Add gfxoff status query for vangogh - Improve BO domain pinning - Fix timestamps for cursor only commits - MES fixes - DCN 3.1.4 support - Misc fixes - Misc code cleanup amdkfd: - Simplify GPUVM validation - Unified memory for CWSR save/restore area - fix possible list corruption on queue failure radeon: - Fix bogus power of two warning UAPI: - Unified memory for CWSR save/restore area for KFD Proposed userspace: https://lists.freedesktop.org/archives/amd-gfx/2022-June/080952.html Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220714214716.8203-1-alexander.deucher@amd.com
This commit is contained in:
commit
60693e3a38
@ -75,7 +75,7 @@ we have a dedicated glossary for Display Core at
|
||||
PSP
|
||||
Platform Security Processor
|
||||
|
||||
RCL
|
||||
RLC
|
||||
RunList Controller
|
||||
|
||||
SDMA
|
||||
|
@ -1253,9 +1253,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job* job);
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job);
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_reset_context *reset_context);
|
||||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_need_post(struct amdgpu_device *adev);
|
||||
|
@ -129,7 +129,14 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
|
||||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||
kfd.reset_work);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||
|
@ -401,22 +401,8 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
|
||||
if (ret) {
|
||||
pr_err("failed to validate PD\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
|
||||
|
||||
if (vm->use_cpu_for_update) {
|
||||
ret = amdgpu_bo_kmap(pd, NULL);
|
||||
if (ret) {
|
||||
pr_err("failed to kmap PD, ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1555,16 +1541,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdkfd_process_info *process_info = vm->process_info;
|
||||
struct amdgpu_bo *pd = vm->root.bo;
|
||||
|
||||
if (!process_info)
|
||||
return;
|
||||
|
||||
/* Release eviction fence from PD */
|
||||
amdgpu_bo_reserve(pd, false);
|
||||
amdgpu_bo_fence(pd, NULL, false);
|
||||
amdgpu_bo_unreserve(pd);
|
||||
|
||||
/* Update process info */
|
||||
mutex_lock(&process_info->lock);
|
||||
process_info->n_vms--;
|
||||
|
@ -383,12 +383,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||
|
||||
value = RREG32_PCIE(*pos);
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
@ -396,11 +392,12 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -441,12 +438,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
||||
uint32_t value;
|
||||
|
||||
r = get_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
WREG32_PCIE(*pos, value);
|
||||
|
||||
@ -456,11 +449,12 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -502,12 +496,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
|
||||
value = RREG32_DIDT(*pos >> 2);
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
@ -515,11 +505,12 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -560,12 +551,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||
uint32_t value;
|
||||
|
||||
r = get_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
WREG32_DIDT(*pos >> 2, value);
|
||||
|
||||
@ -575,11 +562,12 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -621,12 +609,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
|
||||
value = RREG32_SMC(*pos);
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
@ -634,11 +618,12 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -679,12 +664,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||
uint32_t value;
|
||||
|
||||
r = get_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
WREG32_SMC(*pos, value);
|
||||
|
||||
@ -694,11 +675,12 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1090,11 +1072,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
||||
uint32_t value;
|
||||
|
||||
r = get_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, value ? true : false);
|
||||
|
||||
@ -1104,10 +1083,12 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@ -1139,18 +1120,12 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||
uint32_t value;
|
||||
|
||||
r = amdgpu_get_gfx_off_status(adev, &value);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
@ -1158,10 +1133,12 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return result;
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_regs2_fops = {
|
||||
|
@ -5109,7 +5109,8 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
|
||||
*/
|
||||
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job)
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct list_head device_list, *device_list_handle = NULL;
|
||||
bool job_signaled = false;
|
||||
@ -5119,9 +5120,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
bool need_emergency_restart = false;
|
||||
bool audio_suspended = false;
|
||||
int tmp_vram_lost_counter;
|
||||
struct amdgpu_reset_context reset_context;
|
||||
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
/*
|
||||
* Special case: RAS triggered and full reset isn't supported
|
||||
@ -5147,12 +5145,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
if (hive)
|
||||
mutex_lock(&hive->hive_lock);
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
reset_context.job = job;
|
||||
reset_context.hive = hive;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
reset_context->job = job;
|
||||
reset_context->hive = hive;
|
||||
/*
|
||||
* Build list of devices to reset.
|
||||
* In case we are in XGMI hive mode, resort the device list
|
||||
@ -5245,7 +5239,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
|
||||
retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
|
||||
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
|
||||
/*TODO Should we stop ?*/
|
||||
if (r) {
|
||||
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
|
||||
@ -5272,7 +5266,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
|
||||
amdgpu_ras_resume(adev);
|
||||
} else {
|
||||
r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
|
||||
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
|
||||
if (r && r == -EAGAIN)
|
||||
goto retry;
|
||||
}
|
||||
@ -5292,7 +5286,7 @@ skip_hw_reset:
|
||||
if (amdgpu_gpu_recovery == 2 &&
|
||||
!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
|
||||
amdgpu_device_recheck_guilty_jobs(
|
||||
tmp_adev, device_list_handle, &reset_context);
|
||||
tmp_adev, device_list_handle, reset_context);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = tmp_adev->rings[i];
|
||||
|
@ -1559,6 +1559,21 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
|
||||
stime, etime, mode);
|
||||
}
|
||||
|
||||
static bool
|
||||
amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
|
||||
{
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_fb_helper *fb_helper = dev->fb_helper;
|
||||
|
||||
if (!fb_helper || !fb_helper->buffer)
|
||||
return false;
|
||||
|
||||
if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
@ -1594,10 +1609,12 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
|
||||
continue;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
r = amdgpu_bo_reserve(robj, true);
|
||||
if (r == 0) {
|
||||
amdgpu_bo_unpin(robj);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
if (!amdgpu_display_robj_is_fb(adev, robj)) {
|
||||
r = amdgpu_bo_reserve(robj, true);
|
||||
if (r == 0) {
|
||||
amdgpu_bo_unpin(robj);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -844,7 +844,14 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
|
||||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||
reset_work);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||
{
|
||||
@ -64,7 +65,14 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||
ti.process_name, ti.tgid, ti.task_name, ti.pid);
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ring->adev)) {
|
||||
r = amdgpu_device_gpu_recover(ring->adev, job);
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
|
||||
if (r)
|
||||
DRM_ERROR("GPU Recovery Failed: %d\n", r);
|
||||
} else {
|
||||
|
@ -114,8 +114,14 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
|
||||
size_t doorbell_start_offset;
|
||||
size_t doorbell_aperture_size;
|
||||
size_t doorbell_process_limit;
|
||||
size_t aggregated_doorbell_start;
|
||||
int i;
|
||||
|
||||
doorbell_start_offset = (adev->doorbell_index.max_assignment+1) * sizeof(u32);
|
||||
aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
|
||||
aggregated_doorbell_start =
|
||||
roundup(aggregated_doorbell_start, PAGE_SIZE);
|
||||
|
||||
doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
|
||||
doorbell_start_offset =
|
||||
roundup(doorbell_start_offset,
|
||||
amdgpu_mes_doorbell_process_slice(adev));
|
||||
@ -135,6 +141,11 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
|
||||
adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
|
||||
adev->mes.max_doorbell_slices = doorbell_process_limit;
|
||||
|
||||
/* allocate Qword range for aggregated doorbell */
|
||||
for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
|
||||
adev->mes.aggregated_doorbells[i] =
|
||||
aggregated_doorbell_start / sizeof(u32) + i * 2;
|
||||
|
||||
DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
|
||||
return 0;
|
||||
}
|
||||
@ -150,6 +161,7 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
idr_init(&adev->mes.queue_id_idr);
|
||||
ida_init(&adev->mes.doorbell_ida);
|
||||
spin_lock_init(&adev->mes.queue_id_lock);
|
||||
spin_lock_init(&adev->mes.ring_lock);
|
||||
mutex_init(&adev->mes.mutex_hidden);
|
||||
|
||||
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
|
||||
@ -173,9 +185,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
adev->mes.sdma_hqd_mask[i] = 0xfc;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
|
||||
adev->mes.agreegated_doorbells[i] = 0xffffffff;
|
||||
|
||||
r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
@ -716,6 +725,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
|
||||
queue->queue_type = qprops->queue_type;
|
||||
queue->paging = qprops->paging;
|
||||
queue->gang = gang;
|
||||
queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
|
||||
list_add_tail(&queue->list, &gang->queue_list);
|
||||
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
@ -794,8 +804,6 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
|
||||
struct mes_unmap_legacy_queue_input queue_input;
|
||||
int r;
|
||||
|
||||
amdgpu_mes_lock(&adev->mes);
|
||||
|
||||
queue_input.action = action;
|
||||
queue_input.queue_type = ring->funcs->type;
|
||||
queue_input.doorbell_offset = ring->doorbell_index;
|
||||
@ -808,7 +816,6 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
DRM_ERROR("failed to unmap legacy queue\n");
|
||||
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -817,8 +824,6 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
struct mes_misc_op_input op_input;
|
||||
int r, val = 0;
|
||||
|
||||
amdgpu_mes_lock(&adev->mes);
|
||||
|
||||
op_input.op = MES_MISC_OP_READ_REG;
|
||||
op_input.read_reg.reg_offset = reg;
|
||||
op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
|
||||
@ -835,7 +840,6 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
val = *(adev->mes.read_val_ptr);
|
||||
|
||||
error:
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -845,8 +849,6 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
|
||||
struct mes_misc_op_input op_input;
|
||||
int r;
|
||||
|
||||
amdgpu_mes_lock(&adev->mes);
|
||||
|
||||
op_input.op = MES_MISC_OP_WRITE_REG;
|
||||
op_input.write_reg.reg_offset = reg;
|
||||
op_input.write_reg.reg_value = val;
|
||||
@ -862,7 +864,6 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
|
||||
DRM_ERROR("failed to write reg (0x%x)\n", reg);
|
||||
|
||||
error:
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -873,8 +874,6 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
struct mes_misc_op_input op_input;
|
||||
int r;
|
||||
|
||||
amdgpu_mes_lock(&adev->mes);
|
||||
|
||||
op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
|
||||
op_input.wrm_reg.reg0 = reg0;
|
||||
op_input.wrm_reg.reg1 = reg1;
|
||||
@ -892,7 +891,6 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
DRM_ERROR("failed to reg_write_reg_wait\n");
|
||||
|
||||
error:
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -902,8 +900,6 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
|
||||
struct mes_misc_op_input op_input;
|
||||
int r;
|
||||
|
||||
amdgpu_mes_lock(&adev->mes);
|
||||
|
||||
op_input.op = MES_MISC_OP_WRM_REG_WAIT;
|
||||
op_input.wrm_reg.reg0 = reg;
|
||||
op_input.wrm_reg.ref = val;
|
||||
@ -920,7 +916,6 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
|
||||
DRM_ERROR("failed to reg_write_reg_wait\n");
|
||||
|
||||
error:
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1087,6 +1082,12 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
|
||||
enum amdgpu_mes_priority_level prio)
|
||||
{
|
||||
return adev->mes.aggregated_doorbells[prio];
|
||||
}
|
||||
|
||||
int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
|
||||
struct amdgpu_mes_ctx_data *ctx_data)
|
||||
{
|
||||
@ -1188,6 +1189,63 @@ error:
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
|
||||
struct amdgpu_mes_ctx_data *ctx_data)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
|
||||
struct amdgpu_bo *bo = ctx_data->meta_data_obj;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct list_head list, duplicates;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
long r = 0;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
|
||||
tv.bo = &bo->tbo;
|
||||
tv.num_shared = 2;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%ld)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
amdgpu_vm_bo_del(adev, bo_va);
|
||||
if (!amdgpu_vm_ready(vm))
|
||||
goto out_unlock;
|
||||
|
||||
r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
|
||||
if (r)
|
||||
goto out_unlock;
|
||||
if (fence) {
|
||||
amdgpu_bo_fence(bo, fence, true);
|
||||
fence = NULL;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm, &fence);
|
||||
if (r || !fence)
|
||||
goto out_unlock;
|
||||
|
||||
dma_fence_wait(fence, false);
|
||||
amdgpu_bo_fence(bo, fence, true);
|
||||
dma_fence_put(fence);
|
||||
|
||||
out_unlock:
|
||||
if (unlikely(r < 0))
|
||||
dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
|
||||
int pasid, int *gang_id,
|
||||
int queue_type, int num_queue,
|
||||
@ -1294,7 +1352,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
|
||||
r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to alloc ctx meta data\n");
|
||||
goto error_pasid;
|
||||
goto error_fini;
|
||||
}
|
||||
|
||||
ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
|
||||
@ -1349,9 +1407,9 @@ error_queues:
|
||||
amdgpu_mes_destroy_process(adev, pasid);
|
||||
|
||||
error_vm:
|
||||
BUG_ON(amdgpu_bo_reserve(ctx_data.meta_data_obj, true));
|
||||
amdgpu_vm_bo_del(adev, ctx_data.meta_data_va);
|
||||
amdgpu_bo_unreserve(ctx_data.meta_data_obj);
|
||||
amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
|
||||
|
||||
error_fini:
|
||||
amdgpu_vm_fini(adev, vm);
|
||||
|
||||
error_pasid:
|
||||
|
@ -83,6 +83,7 @@ struct amdgpu_mes {
|
||||
uint64_t default_gang_quantum;
|
||||
|
||||
struct amdgpu_ring ring;
|
||||
spinlock_t ring_lock;
|
||||
|
||||
const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
|
||||
|
||||
@ -112,7 +113,7 @@ struct amdgpu_mes {
|
||||
uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
|
||||
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
|
||||
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
|
||||
uint32_t agreegated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
|
||||
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
|
||||
uint32_t sch_ctx_offs;
|
||||
uint64_t sch_ctx_gpu_addr;
|
||||
uint64_t *sch_ctx_ptr;
|
||||
@ -346,12 +347,17 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
|
||||
void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring);
|
||||
|
||||
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
|
||||
enum amdgpu_mes_priority_level prio);
|
||||
|
||||
int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
|
||||
struct amdgpu_mes_ctx_data *ctx_data);
|
||||
void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
|
||||
int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_mes_ctx_data *ctx_data);
|
||||
int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
|
||||
struct amdgpu_mes_ctx_data *ctx_data);
|
||||
|
||||
int amdgpu_mes_self_test(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -882,6 +882,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
if (WARN_ON_ONCE(min_offset > max_offset))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check domain to be pinned to against preferred domains */
|
||||
if (bo->preferred_domains & domain)
|
||||
domain = bo->preferred_domains & domain;
|
||||
|
||||
/* A shared bo cannot be migrated to VRAM */
|
||||
if (bo->tbo.base.import_attach) {
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT)
|
||||
|
@ -717,27 +717,30 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
||||
if (!con)
|
||||
return -EINVAL;
|
||||
|
||||
info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
if (head->block == AMDGPU_RAS_BLOCK__GFX) {
|
||||
info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!enable) {
|
||||
info->disable_features = (struct ta_ras_disable_features_input) {
|
||||
.block_id = amdgpu_ras_block_to_ta(head->block),
|
||||
.error_type = amdgpu_ras_error_to_ta(head->type),
|
||||
};
|
||||
} else {
|
||||
info->enable_features = (struct ta_ras_enable_features_input) {
|
||||
.block_id = amdgpu_ras_block_to_ta(head->block),
|
||||
.error_type = amdgpu_ras_error_to_ta(head->type),
|
||||
};
|
||||
if (!enable) {
|
||||
info->disable_features = (struct ta_ras_disable_features_input) {
|
||||
.block_id = amdgpu_ras_block_to_ta(head->block),
|
||||
.error_type = amdgpu_ras_error_to_ta(head->type),
|
||||
};
|
||||
} else {
|
||||
info->enable_features = (struct ta_ras_enable_features_input) {
|
||||
.block_id = amdgpu_ras_block_to_ta(head->block),
|
||||
.error_type = amdgpu_ras_error_to_ta(head->type),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/* Do not enable if it is not allowed. */
|
||||
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
|
||||
|
||||
/* Only enable ras feature operation handle on host side */
|
||||
if (!amdgpu_sriov_vf(adev) &&
|
||||
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
|
||||
!amdgpu_sriov_vf(adev) &&
|
||||
!amdgpu_ras_intr_triggered()) {
|
||||
ret = psp_ras_enable_features(&adev->psp, info, enable);
|
||||
if (ret) {
|
||||
@ -753,7 +756,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
||||
__amdgpu_ras_feature_enable(adev, head, enable);
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(info);
|
||||
if (head->block == AMDGPU_RAS_BLOCK__GFX)
|
||||
kfree(info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1938,8 +1942,16 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ras->adev))
|
||||
amdgpu_device_gpu_recover(ras->adev, NULL);
|
||||
if (amdgpu_device_should_recover_gpu(ras->adev)) {
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
|
||||
}
|
||||
atomic_set(&ras->in_recovery, 0);
|
||||
}
|
||||
|
||||
@ -2150,7 +2162,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
||||
bool exc_err_limit = false;
|
||||
int ret;
|
||||
|
||||
if (!con)
|
||||
if (!con || amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
/* Allow access to RAS EEPROM via debugfs, when the ASIC
|
||||
|
@ -496,7 +496,8 @@ static int amdgpu_vkms_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
|
@ -2168,6 +2168,14 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
} else {
|
||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||
}
|
||||
/*
|
||||
* Make sure root PD gets mapped. As vm_update_mode could be changed
|
||||
* when turning a GFX VM into a compute VM.
|
||||
*/
|
||||
r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo));
|
||||
if (r)
|
||||
goto unreserve_bo;
|
||||
|
||||
dma_fence_put(vm->last_update);
|
||||
vm->last_update = NULL;
|
||||
vm->is_compute_context = true;
|
||||
|
@ -2796,7 +2796,8 @@ static int dce_v10_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -2914,7 +2914,8 @@ static int dce_v11_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -2673,7 +2673,8 @@ static int dce_v6_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
|
@ -2693,7 +2693,8 @@ static int dce_v8_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -8525,14 +8525,45 @@ static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
|
||||
static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
|
||||
uint64_t wptr_tmp;
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
|
||||
|
||||
wptr_tmp = ring->wptr & ring->buf_mask;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
|
||||
*wptr_saved = wptr_tmp;
|
||||
/* assume doorbell always being used by mes mapped queue */
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
} else {
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
}
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI,
|
||||
upper_32_bits(ring->wptr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -8557,13 +8588,42 @@ static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
|
||||
static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
|
||||
uint64_t wptr_tmp;
|
||||
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->use_doorbell) {
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
|
||||
|
||||
wptr_tmp = ring->wptr & ring->buf_mask;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
|
||||
*wptr_saved = wptr_tmp;
|
||||
/* assume doorbell always used by mes mapped queue */
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
} else {
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
}
|
||||
} else {
|
||||
BUG(); /* only DOORBELL method supported on gfx10 now */
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->use_doorbell) {
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
BUG(); /* only DOORBELL method supported on gfx10 now */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,6 +126,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
|
||||
static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
|
||||
uint16_t pasid, uint32_t flush_type,
|
||||
bool all_hub, uint8_t dst_sel);
|
||||
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
|
||||
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
|
||||
|
||||
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
|
||||
{
|
||||
@ -4743,63 +4745,143 @@ static int gfx_v11_0_soft_reset(void *handle)
|
||||
{
|
||||
u32 grbm_soft_reset = 0;
|
||||
u32 tmp;
|
||||
int i, j, k;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* GRBM_STATUS */
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS);
|
||||
if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
|
||||
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
|
||||
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
|
||||
GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
|
||||
GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) {
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
|
||||
GRBM_SOFT_RESET, SOFT_RESET_CP,
|
||||
1);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
|
||||
GRBM_SOFT_RESET, SOFT_RESET_GFX,
|
||||
1);
|
||||
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
|
||||
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
|
||||
|
||||
gfx_v11_0_set_safe_mode(adev);
|
||||
|
||||
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
|
||||
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
|
||||
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
|
||||
WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
|
||||
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
|
||||
WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < adev->gfx.me.num_me; ++i) {
|
||||
for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
|
||||
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
|
||||
WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
|
||||
|
||||
WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
|
||||
GRBM_SOFT_RESET, SOFT_RESET_CP,
|
||||
1);
|
||||
WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
|
||||
|
||||
// Read CP_VMID_RESET register three times.
|
||||
// to get sufficient time for GFX_HQD_ACTIVE reach 0
|
||||
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
|
||||
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
|
||||
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
|
||||
!RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
if (i >= adev->usec_timeout) {
|
||||
printk("Failed to wait all pipes clean\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* GRBM_STATUS2 */
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS2);
|
||||
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
|
||||
GRBM_SOFT_RESET,
|
||||
SOFT_RESET_RLC,
|
||||
1);
|
||||
/********** trigger soft reset ***********/
|
||||
grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CP, 1);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_GFX, 1);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CPF, 1);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CPC, 1);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CPG, 1);
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
|
||||
/********** exit soft reset ***********/
|
||||
grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CP, 0);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_GFX, 0);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CPF, 0);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CPC, 0);
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
|
||||
SOFT_RESET_CPG, 0);
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
|
||||
|
||||
if (grbm_soft_reset) {
|
||||
/* stop the rlc */
|
||||
gfx_v11_0_rlc_stop(adev);
|
||||
tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
|
||||
WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
|
||||
|
||||
/* Disable GFX parsing/prefetching */
|
||||
gfx_v11_0_cp_gfx_enable(adev, false);
|
||||
WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
|
||||
WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
|
||||
|
||||
/* Disable MEC parsing/prefetching */
|
||||
gfx_v11_0_cp_compute_enable(adev, false);
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
tmp |= grbm_soft_reset;
|
||||
dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~grbm_soft_reset;
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
udelay(50);
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
return 0;
|
||||
if (i >= adev->usec_timeout) {
|
||||
printk("Failed to wait CP_VMID_RESET to 0\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
|
||||
|
||||
gfx_v11_0_unset_safe_mode(adev);
|
||||
|
||||
return gfx_v11_0_cp_resume(adev);
|
||||
}
|
||||
|
||||
static bool gfx_v11_0_check_soft_reset(void *handle)
|
||||
{
|
||||
int i, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
long tmo = msecs_to_jiffies(1000);
|
||||
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||
ring = &adev->gfx.gfx_ring[i];
|
||||
r = amdgpu_ring_test_ib(ring, tmo);
|
||||
if (r)
|
||||
return true;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
ring = &adev->gfx.compute_ring[i];
|
||||
r = amdgpu_ring_test_ib(ring, tmo);
|
||||
if (r)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
@ -5297,14 +5379,45 @@ static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
|
||||
static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
|
||||
uint64_t wptr_tmp;
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
ring->hw_prio);
|
||||
|
||||
wptr_tmp = ring->wptr & ring->buf_mask;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
|
||||
*wptr_saved = wptr_tmp;
|
||||
/* assume doorbell always being used by mes mapped queue */
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
} else {
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
}
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
|
||||
upper_32_bits(ring->wptr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5329,13 +5442,42 @@ static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
|
||||
static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
|
||||
uint64_t wptr_tmp;
|
||||
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->use_doorbell) {
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
ring->hw_prio);
|
||||
|
||||
wptr_tmp = ring->wptr & ring->buf_mask;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
|
||||
*wptr_saved = wptr_tmp;
|
||||
/* assume doorbell always used by mes mapped queue */
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
} else {
|
||||
WDOORBELL64(ring->doorbell_index, wptr_tmp);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index, wptr_tmp);
|
||||
}
|
||||
} else {
|
||||
BUG(); /* only DOORBELL method supported on gfx11 now */
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->use_doorbell) {
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr);
|
||||
} else {
|
||||
BUG(); /* only DOORBELL method supported on gfx11 now */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -6132,6 +6274,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
|
||||
.is_idle = gfx_v11_0_is_idle,
|
||||
.wait_for_idle = gfx_v11_0_wait_for_idle,
|
||||
.soft_reset = gfx_v11_0_soft_reset,
|
||||
.check_soft_reset = gfx_v11_0_check_soft_reset,
|
||||
.set_clockgating_state = gfx_v11_0_set_clockgating_state,
|
||||
.set_powergating_state = gfx_v11_0_set_powergating_state,
|
||||
.get_clockgating_state = gfx_v11_0_get_clockgating_state,
|
||||
|
@ -834,10 +834,21 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
|
||||
|
||||
/* set the gart size */
|
||||
if (amdgpu_gart_size == -1)
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
else
|
||||
if (amdgpu_gart_size == -1) {
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
default:
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
case IP_VERSION(10, 3, 1): /* DCE SG support */
|
||||
case IP_VERSION(10, 3, 3): /* DCE SG support */
|
||||
case IP_VERSION(10, 3, 6): /* DCE SG support */
|
||||
case IP_VERSION(10, 3, 7): /* DCE SG support */
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
|
||||
}
|
||||
|
||||
gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
|
||||
|
||||
|
@ -87,21 +87,32 @@ static const struct amdgpu_ring_funcs mes_v10_1_ring_funcs = {
|
||||
};
|
||||
|
||||
static int mes_v10_1_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
void *pkt, int size)
|
||||
void *pkt, int size,
|
||||
int api_status_off)
|
||||
{
|
||||
int ndw = size / 4;
|
||||
signed long r;
|
||||
union MESAPI__ADD_QUEUE *x_pkt = pkt;
|
||||
struct MES_API_STATUS *api_status;
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
struct amdgpu_ring *ring = &mes->ring;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(size % 4 != 0);
|
||||
|
||||
if (amdgpu_ring_alloc(ring, ndw))
|
||||
spin_lock_irqsave(&mes->ring_lock, flags);
|
||||
if (amdgpu_ring_alloc(ring, ndw)) {
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
|
||||
api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr;
|
||||
api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
amdgpu_ring_write_multiple(ring, pkt, ndw);
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
|
||||
DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
|
||||
|
||||
@ -166,13 +177,9 @@ static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_add_queue_pkt.gws_size = input->gws_size;
|
||||
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
|
||||
|
||||
mes_add_queue_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_add_queue_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v10_1_submit_pkt_and_poll_completion(mes,
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt));
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
|
||||
offsetof(union MESAPI__ADD_QUEUE, api_status));
|
||||
}
|
||||
|
||||
static int mes_v10_1_remove_hw_queue(struct amdgpu_mes *mes,
|
||||
@ -189,13 +196,9 @@ static int mes_v10_1_remove_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
|
||||
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
|
||||
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v10_1_submit_pkt_and_poll_completion(mes,
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt));
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
|
||||
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
|
||||
}
|
||||
|
||||
static int mes_v10_1_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
@ -227,13 +230,9 @@ static int mes_v10_1_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
mes_remove_queue_pkt.unmap_kiq_utility_queue = 1;
|
||||
}
|
||||
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v10_1_submit_pkt_and_poll_completion(mes,
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt));
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
|
||||
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
|
||||
}
|
||||
|
||||
static int mes_v10_1_suspend_gang(struct amdgpu_mes *mes,
|
||||
@ -258,13 +257,9 @@ static int mes_v10_1_query_sched_status(struct amdgpu_mes *mes)
|
||||
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
|
||||
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
|
||||
mes_status_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_status_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v10_1_submit_pkt_and_poll_completion(mes,
|
||||
&mes_status_pkt, sizeof(mes_status_pkt));
|
||||
&mes_status_pkt, sizeof(mes_status_pkt),
|
||||
offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
|
||||
}
|
||||
|
||||
static int mes_v10_1_set_hw_resources(struct amdgpu_mes *mes)
|
||||
@ -299,7 +294,7 @@ static int mes_v10_1_set_hw_resources(struct amdgpu_mes *mes)
|
||||
|
||||
for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
|
||||
mes_set_hw_res_pkt.aggregated_doorbells[i] =
|
||||
mes->agreegated_doorbells[i];
|
||||
mes->aggregated_doorbells[i];
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
|
||||
@ -313,13 +308,63 @@ static int mes_v10_1_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.disable_mes_log = 1;
|
||||
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
|
||||
|
||||
mes_set_hw_res_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_set_hw_res_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v10_1_submit_pkt_and_poll_completion(mes,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt));
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
|
||||
}
|
||||
|
||||
static void mes_v10_1_init_aggregated_doorbell(struct amdgpu_mes *mes)
|
||||
{
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL1);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] <<
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL1, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL2);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] <<
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL2, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL3);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] <<
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL3, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL4);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] <<
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL4, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL5);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] <<
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, mmCP_MES_DOORBELL_CONTROL5, data);
|
||||
|
||||
data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_GFX_CONTROL, data);
|
||||
}
|
||||
|
||||
static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
|
||||
@ -1121,6 +1166,8 @@ static int mes_v10_1_hw_init(void *handle)
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
mes_v10_1_init_aggregated_doorbell(&adev->mes);
|
||||
|
||||
r = mes_v10_1_query_sched_status(&adev->mes);
|
||||
if (r) {
|
||||
DRM_ERROR("MES is busy\n");
|
||||
@ -1133,6 +1180,7 @@ static int mes_v10_1_hw_init(void *handle)
|
||||
* with MES enabled.
|
||||
*/
|
||||
adev->gfx.kiq.ring.sched.ready = false;
|
||||
adev->mes.ring.sched.ready = true;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1145,6 +1193,8 @@ static int mes_v10_1_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mes.ring.sched.ready = false;
|
||||
|
||||
mes_v10_1_enable(adev, false);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
|
@ -86,21 +86,32 @@ static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
|
||||
};
|
||||
|
||||
static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
void *pkt, int size)
|
||||
void *pkt, int size,
|
||||
int api_status_off)
|
||||
{
|
||||
int ndw = size / 4;
|
||||
signed long r;
|
||||
union MESAPI__ADD_QUEUE *x_pkt = pkt;
|
||||
struct MES_API_STATUS *api_status;
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
struct amdgpu_ring *ring = &mes->ring;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(size % 4 != 0);
|
||||
|
||||
if (amdgpu_ring_alloc(ring, ndw))
|
||||
spin_lock_irqsave(&mes->ring_lock, flags);
|
||||
if (amdgpu_ring_alloc(ring, ndw)) {
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
|
||||
api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr;
|
||||
api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
amdgpu_ring_write_multiple(ring, pkt, ndw);
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
|
||||
DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
|
||||
|
||||
@ -173,13 +184,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_add_queue_pkt.tma_addr = input->tma_addr;
|
||||
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
|
||||
|
||||
mes_add_queue_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_add_queue_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt));
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
|
||||
offsetof(union MESAPI__ADD_QUEUE, api_status));
|
||||
}
|
||||
|
||||
static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
|
||||
@ -196,13 +203,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
|
||||
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
|
||||
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt));
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
|
||||
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
|
||||
}
|
||||
|
||||
static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
@ -216,7 +219,7 @@ static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
|
||||
mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
|
||||
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset << 2;
|
||||
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
|
||||
mes_remove_queue_pkt.gang_context_addr = 0;
|
||||
|
||||
mes_remove_queue_pkt.pipe_id = input->pipe_id;
|
||||
@ -228,19 +231,14 @@ static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
mes_remove_queue_pkt.tf_data =
|
||||
lower_32_bits(input->trail_fence_data);
|
||||
} else {
|
||||
if (input->queue_type == AMDGPU_RING_TYPE_GFX)
|
||||
mes_remove_queue_pkt.unmap_legacy_gfx_queue = 1;
|
||||
else
|
||||
mes_remove_queue_pkt.unmap_kiq_utility_queue = 1;
|
||||
mes_remove_queue_pkt.unmap_legacy_queue = 1;
|
||||
mes_remove_queue_pkt.queue_type =
|
||||
convert_to_mes_queue_type(input->queue_type);
|
||||
}
|
||||
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_remove_queue_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt));
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
|
||||
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
|
||||
}
|
||||
|
||||
static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
|
||||
@ -265,13 +263,9 @@ static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
|
||||
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
|
||||
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
|
||||
mes_status_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_status_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_status_pkt, sizeof(mes_status_pkt));
|
||||
&mes_status_pkt, sizeof(mes_status_pkt),
|
||||
offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
|
||||
}
|
||||
|
||||
static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
|
||||
@ -317,13 +311,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
misc_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
misc_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&misc_pkt, sizeof(misc_pkt));
|
||||
&misc_pkt, sizeof(misc_pkt),
|
||||
offsetof(union MESAPI__MISC, api_status));
|
||||
}
|
||||
|
||||
static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
@ -358,7 +348,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
|
||||
for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
|
||||
mes_set_hw_res_pkt.aggregated_doorbells[i] =
|
||||
mes->agreegated_doorbells[i];
|
||||
mes->aggregated_doorbells[i];
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
|
||||
@ -373,13 +363,63 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
|
||||
mes_set_hw_res_pkt.oversubscription_timer = 50;
|
||||
|
||||
mes_set_hw_res_pkt.api_status.api_completion_fence_addr =
|
||||
mes->ring.fence_drv.gpu_addr;
|
||||
mes_set_hw_res_pkt.api_status.api_completion_fence_value =
|
||||
++mes->ring.fence_drv.sync_seq;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt));
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
|
||||
}
|
||||
|
||||
static void mes_v11_0_init_aggregated_doorbell(struct amdgpu_mes *mes)
|
||||
{
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] <<
|
||||
CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] <<
|
||||
CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] <<
|
||||
CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] <<
|
||||
CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5);
|
||||
data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK |
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK |
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK);
|
||||
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] <<
|
||||
CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT;
|
||||
data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data);
|
||||
|
||||
data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT;
|
||||
WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data);
|
||||
}
|
||||
|
||||
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
|
||||
@ -1181,6 +1221,8 @@ static int mes_v11_0_hw_init(void *handle)
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
mes_v11_0_init_aggregated_doorbell(&adev->mes);
|
||||
|
||||
r = mes_v11_0_query_sched_status(&adev->mes);
|
||||
if (r) {
|
||||
DRM_ERROR("MES is busy\n");
|
||||
@ -1204,6 +1246,9 @@ failure:
|
||||
|
||||
static int mes_v11_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->mes.ring.sched.ready = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -283,8 +283,16 @@ flr_done:
|
||||
/* Trigger recovery for world switch failure if no TDR */
|
||||
if (amdgpu_device_should_recover_gpu(adev)
|
||||
&& (!amdgpu_device_has_job_running(adev) ||
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
}
|
||||
|
||||
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -310,8 +310,16 @@ flr_done:
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
}
|
||||
|
||||
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -522,8 +522,16 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* Trigger recovery due to world switch failure */
|
||||
if (amdgpu_device_should_recover_gpu(adev))
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
if (amdgpu_device_should_recover_gpu(adev)) {
|
||||
struct amdgpu_reset_context reset_context;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
}
|
||||
|
||||
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
@ -389,34 +389,67 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
|
||||
|
||||
DRM_DEBUG("Setting write pointer\n");
|
||||
if (ring->use_doorbell) {
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr << 2) == 0x%08x "
|
||||
"upper_32_bits(ring->wptr << 2) == 0x%08x\n",
|
||||
ring->wptr_offs,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
|
||||
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
*wptr_saved = ring->wptr << 2;
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index,
|
||||
ring->wptr << 2);
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
|
||||
ring->me,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
ring->me,
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
if (ring->use_doorbell) {
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
|
||||
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
|
||||
ring->wptr_offs,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
|
||||
ring->me,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
ring->me,
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
|
||||
ring->me, mmSDMA0_GFX_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
|
||||
ring->me, mmSDMA0_GFX_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@ static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int sdma_v6_0_start(struct amdgpu_device *adev);
|
||||
|
||||
static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
|
||||
{
|
||||
@ -245,34 +246,68 @@ static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t *wptr_saved;
|
||||
uint32_t *is_queue_unmap;
|
||||
uint64_t aggregated_db_index;
|
||||
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
|
||||
|
||||
DRM_DEBUG("Setting write pointer\n");
|
||||
if (ring->use_doorbell) {
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
|
||||
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
|
||||
ring->wptr_offs,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
|
||||
if (ring->is_mes_queue) {
|
||||
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
|
||||
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
|
||||
sizeof(uint32_t));
|
||||
aggregated_db_index =
|
||||
amdgpu_mes_get_aggregated_doorbell_index(adev,
|
||||
ring->hw_prio);
|
||||
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
*wptr_saved = ring->wptr << 2;
|
||||
if (*is_queue_unmap) {
|
||||
WDOORBELL64(aggregated_db_index, ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
|
||||
if (*is_queue_unmap)
|
||||
WDOORBELL64(aggregated_db_index,
|
||||
ring->wptr << 2);
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"regSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
|
||||
ring->me,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
ring->me,
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, ring->me, regSDMA0_QUEUE0_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
if (ring->use_doorbell) {
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
|
||||
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
|
||||
ring->wptr_offs,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
|
||||
ring->wptr << 2);
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"regSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
|
||||
ring->me,
|
||||
lower_32_bits(ring->wptr << 2),
|
||||
ring->me,
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
|
||||
ring->me, regSDMA0_QUEUE0_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
|
||||
ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -771,32 +806,54 @@ static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
|
||||
static int sdma_v6_0_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 grbm_soft_reset;
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
sdma_v6_0_gfx_stop(adev);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
grbm_soft_reset = REG_SET_FIELD(0,
|
||||
GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
|
||||
1);
|
||||
grbm_soft_reset <<= i;
|
||||
tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE));
|
||||
tmp |= SDMA0_FREEZE__FREEZE_MASK;
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp);
|
||||
tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
tmp |= SDMA0_F32_CNTL__TH1_RESET_MASK;
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), tmp);
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
tmp |= grbm_soft_reset;
|
||||
DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0);
|
||||
|
||||
udelay(100);
|
||||
|
||||
tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i;
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
udelay(100);
|
||||
|
||||
tmp &= ~grbm_soft_reset;
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
|
||||
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0);
|
||||
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return sdma_v6_0_start(adev);
|
||||
}
|
||||
|
||||
static bool sdma_v6_0_check_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, r;
|
||||
long tmo = msecs_to_jiffies(1000);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
r = amdgpu_ring_test_ib(ring, tmo);
|
||||
if (r)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -830,7 +887,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev)
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
sdma_v6_0_soft_reset(adev);
|
||||
/* unhalt the MEs */
|
||||
sdma_v6_0_enable(adev, true);
|
||||
/* enable sdma ring preemption */
|
||||
@ -1526,6 +1582,7 @@ const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
|
||||
.is_idle = sdma_v6_0_is_idle,
|
||||
.wait_for_idle = sdma_v6_0_wait_for_idle,
|
||||
.soft_reset = sdma_v6_0_soft_reset,
|
||||
.check_soft_reset = sdma_v6_0_check_soft_reset,
|
||||
.set_clockgating_state = sdma_v6_0_set_clockgating_state,
|
||||
.set_powergating_state = sdma_v6_0_set_powergating_state,
|
||||
.get_clockgating_state = sdma_v6_0_get_clockgating_state,
|
||||
|
@ -417,7 +417,13 @@ static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
|
||||
|
||||
static bool soc21_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
return true;
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
|
||||
|
@ -184,6 +184,8 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
|
||||
/* Navi2x+, Navi1x+ */
|
||||
if (gc_version == IP_VERSION(10, 3, 6))
|
||||
kfd->device_info.no_atomic_fw_version = 14;
|
||||
else if (gc_version == IP_VERSION(10, 3, 7))
|
||||
kfd->device_info.no_atomic_fw_version = 3;
|
||||
else if (gc_version >= IP_VERSION(10, 3, 0))
|
||||
kfd->device_info.no_atomic_fw_version = 92;
|
||||
else if (gc_version >= IP_VERSION(10, 1, 1))
|
||||
|
@ -1674,14 +1674,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
||||
if (q->properties.is_active) {
|
||||
increment_queue_count(dqm, qpd, q);
|
||||
|
||||
if (!dqm->dev->shared_resources.enable_mes) {
|
||||
if (!dqm->dev->shared_resources.enable_mes)
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
} else {
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
else
|
||||
retval = add_queue_mes(dqm, q, qpd);
|
||||
if (retval)
|
||||
goto cleanup_queue;
|
||||
}
|
||||
if (retval)
|
||||
goto cleanup_queue;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1775,10 +1775,15 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
|
||||
pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
|
||||
svms, prange->start, prange->last, start, last);
|
||||
|
||||
if (!p->xnack_enabled) {
|
||||
if (!p->xnack_enabled ||
|
||||
(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
|
||||
int evicted_ranges;
|
||||
bool mapped = prange->mapped_to_gpu;
|
||||
|
||||
list_for_each_entry(pchild, &prange->child_list, child_list) {
|
||||
if (!pchild->mapped_to_gpu)
|
||||
continue;
|
||||
mapped = true;
|
||||
mutex_lock_nested(&pchild->lock, 1);
|
||||
if (pchild->start <= last && pchild->last >= start) {
|
||||
pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
|
||||
@ -1788,6 +1793,9 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
|
||||
mutex_unlock(&pchild->lock);
|
||||
}
|
||||
|
||||
if (!mapped)
|
||||
return r;
|
||||
|
||||
if (prange->start <= last && prange->last >= start)
|
||||
atomic_inc(&prange->invalid);
|
||||
|
||||
@ -3343,7 +3351,9 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
|
||||
if (r)
|
||||
goto out_unlock_range;
|
||||
|
||||
if (migrated && !p->xnack_enabled) {
|
||||
if (migrated && (!p->xnack_enabled ||
|
||||
(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
|
||||
prange->mapped_to_gpu) {
|
||||
pr_debug("restore_work will update mappings of GPUs\n");
|
||||
mutex_unlock(&prange->migrate_mutex);
|
||||
continue;
|
||||
|
@ -72,6 +72,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <drm/display/drm_dp_mst_helper.h>
|
||||
#include <drm/display/drm_hdmi_helper.h>
|
||||
@ -119,6 +120,8 @@ MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
|
||||
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
|
||||
#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
|
||||
#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
|
||||
#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
|
||||
#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
|
||||
@ -473,6 +476,26 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||
vrr_active, (int) !e);
|
||||
}
|
||||
|
||||
static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
|
||||
{
|
||||
struct drm_crtc *crtc = &acrtc->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
drm_crtc_handle_vblank(crtc);
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
/* Send completion event for cursor-only commits */
|
||||
if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||
drm_crtc_send_vblank_event(crtc, acrtc->event);
|
||||
drm_crtc_vblank_put(crtc);
|
||||
acrtc->event = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
static void dm_vupdate_high_irq(void *interrupt_params)
|
||||
{
|
||||
struct common_irq_params *irq_params = interrupt_params;
|
||||
@ -511,7 +534,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
||||
* if a pageflip happened inside front-porch.
|
||||
*/
|
||||
if (vrr_active) {
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
dm_crtc_handle_vblank(acrtc);
|
||||
|
||||
/* BTR processing for pre-DCE12 ASICs */
|
||||
if (acrtc->dm_irq_params.stream &&
|
||||
@ -563,7 +586,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
||||
* to dm_vupdate_high_irq after end of front-porch.
|
||||
*/
|
||||
if (!vrr_active)
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
dm_crtc_handle_vblank(acrtc);
|
||||
|
||||
/**
|
||||
* Following stuff must happen at start of vblank, for crc
|
||||
@ -1403,6 +1426,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
|
||||
dm->aux_hpd_discon_quirk = false;
|
||||
|
||||
dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
|
||||
if (dmi_id) {
|
||||
dm->aux_hpd_discon_quirk = true;
|
||||
DRM_INFO("aux_hpd_discon_quirk attached\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
@ -1530,7 +1588,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
init_data.flags.enable_mipi_converter_optimization = true;
|
||||
|
||||
init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
|
||||
init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
|
||||
|
||||
INIT_LIST_HEAD(&adev->dm.da_list);
|
||||
|
||||
retrieve_dmi_info(&adev->dm);
|
||||
|
||||
/* Display Core create. */
|
||||
adev->dm.dc = dc_create(&init_data);
|
||||
|
||||
@ -1562,6 +1626,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
|
||||
adev->dm.dc->debug.disable_clock_gate = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
|
||||
adev->dm.dc->debug.force_subvp_mclk_switch = true;
|
||||
|
||||
r = dm_dmub_hw_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||
@ -1617,7 +1684,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
|
||||
#endif
|
||||
if (dc_enable_dmub_notifications(adev->dm.dc)) {
|
||||
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
|
||||
init_completion(&adev->dm.dmub_aux_transfer_done);
|
||||
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
|
||||
if (!adev->dm.dmub_notify) {
|
||||
@ -1653,6 +1720,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
|
||||
* It is expected that DMUB will resend any pending notifications at this point, for
|
||||
* example HPD from DPIA.
|
||||
*/
|
||||
if (dc_is_dmub_outbox_supported(adev->dm.dc))
|
||||
dc_enable_dmub_outbox(adev->dm.dc);
|
||||
|
||||
/* create fake encoders for MST */
|
||||
dm_dp_create_fake_mst_encoders(adev);
|
||||
|
||||
@ -1941,6 +2015,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
|
||||
fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
|
||||
break;
|
||||
case IP_VERSION(3, 1, 4):
|
||||
dmub_asic = DMUB_ASIC_DCN314;
|
||||
fw_name_dmub = FIRMWARE_DCN_314_DMUB;
|
||||
break;
|
||||
case IP_VERSION(3, 1, 5):
|
||||
dmub_asic = DMUB_ASIC_DCN315;
|
||||
fw_name_dmub = FIRMWARE_DCN_315_DMUB;
|
||||
@ -2625,9 +2703,6 @@ static int dm_resume(void *handle)
|
||||
*/
|
||||
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
|
||||
|
||||
if (dc_enable_dmub_notifications(adev->dm.dc))
|
||||
amdgpu_dm_outbox_init(adev);
|
||||
|
||||
r = dm_dmub_hw_init(adev);
|
||||
if (r)
|
||||
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||
@ -2645,6 +2720,11 @@ static int dm_resume(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
|
||||
amdgpu_dm_outbox_init(adev);
|
||||
dc_enable_dmub_outbox(adev->dm.dc);
|
||||
}
|
||||
|
||||
WARN_ON(!dc_commit_state(dm->dc, dc_state));
|
||||
|
||||
dm_gpureset_commit_state(dm->cached_dc_state, dm);
|
||||
@ -2666,13 +2746,15 @@ static int dm_resume(void *handle)
|
||||
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
|
||||
dc_resource_state_construct(dm->dc, dm_state->context);
|
||||
|
||||
/* Re-enable outbox interrupts for DPIA. */
|
||||
if (dc_enable_dmub_notifications(adev->dm.dc))
|
||||
amdgpu_dm_outbox_init(adev);
|
||||
|
||||
/* Before powering on DC we need to re-initialize DMUB. */
|
||||
dm_dmub_hw_resume(adev);
|
||||
|
||||
/* Re-enable outbox interrupts for DPIA. */
|
||||
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
|
||||
amdgpu_dm_outbox_init(adev);
|
||||
dc_enable_dmub_outbox(adev->dm.dc);
|
||||
}
|
||||
|
||||
/* power on hardware */
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
||||
@ -2705,10 +2787,13 @@ static int dm_resume(void *handle)
|
||||
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none)
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
else
|
||||
} else {
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
|
||||
aconnector->fake_enable = false;
|
||||
@ -3039,6 +3124,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
|
||||
#endif
|
||||
bool ret = false;
|
||||
|
||||
if (adev->dm.disable_hpd_irq)
|
||||
return;
|
||||
@ -3070,16 +3156,20 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else {
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
if (ret) {
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
|
||||
@ -3274,19 +3364,25 @@ out:
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
} else {
|
||||
bool ret = false;
|
||||
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
if (ret) {
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
@ -3820,7 +3916,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
/* indicates support for immediate flip */
|
||||
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||
|
||||
@ -4221,6 +4318,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
@ -4241,6 +4339,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
switch (adev->ip_versions[DCE_HWIP][0]) {
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
@ -4290,23 +4389,30 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(link);
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
} else {
|
||||
bool ret = false;
|
||||
|
||||
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
register_backlight_device(dm, link);
|
||||
if (dm->num_of_edps)
|
||||
update_connector_ext_caps(aconnector);
|
||||
if (psr_feature_enabled)
|
||||
amdgpu_dm_set_psr_caps(link);
|
||||
mutex_lock(&dm->dc_lock);
|
||||
ret = dc_link_detect(link, DETECT_REASON_BOOT);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
|
||||
* PSR is also supported.
|
||||
*/
|
||||
if (link->psr_settings.psr_feature_enabled)
|
||||
adev_to_drm(adev)->vblank_disable_immediate = false;
|
||||
if (ret) {
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
register_backlight_device(dm, link);
|
||||
|
||||
if (dm->num_of_edps)
|
||||
update_connector_ext_caps(aconnector);
|
||||
|
||||
if (psr_feature_enabled)
|
||||
amdgpu_dm_set_psr_caps(link);
|
||||
|
||||
/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
|
||||
* PSR is also supported.
|
||||
*/
|
||||
if (link->psr_settings.psr_feature_enabled)
|
||||
adev_to_drm(adev)->vblank_disable_immediate = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/* Software is initialized. Now we can register interrupt handlers. */
|
||||
@ -4357,6 +4463,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
case IP_VERSION(3, 0, 1):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
@ -4545,6 +4652,7 @@ static int dm_early_init(void *handle)
|
||||
case IP_VERSION(2, 1, 0):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
@ -5295,6 +5403,7 @@ get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_
|
||||
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
add_gfx11_modifiers(adev, mods, &size, &capacity);
|
||||
break;
|
||||
}
|
||||
@ -5474,7 +5583,7 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
|
||||
}
|
||||
}
|
||||
|
||||
if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
|
||||
if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
|
||||
*pre_multiplied_alpha = false;
|
||||
}
|
||||
|
||||
@ -7213,12 +7322,10 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
dc_result = dc_validate_stream(adev->dm.dc, stream);
|
||||
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
|
||||
|
||||
if (dc_result == DC_OK)
|
||||
dc_result = dc_validate_stream(adev->dm.dc, stream);
|
||||
|
||||
if (dc_result != DC_OK) {
|
||||
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
|
||||
drm_mode->hdisplay,
|
||||
@ -8651,7 +8758,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
|
||||
if (dc_submit_i2c(
|
||||
ddc_service->ctx->dc,
|
||||
ddc_service->ddc_pin->hw_info.ddc_channel,
|
||||
ddc_service->link->link_index,
|
||||
&cmd))
|
||||
result = num;
|
||||
|
||||
@ -8687,8 +8794,6 @@ create_i2c(struct ddc_service *ddc_service,
|
||||
snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
|
||||
i2c_set_adapdata(&i2c->base, i2c);
|
||||
i2c->ddc_service = ddc_service;
|
||||
if (i2c->ddc_service->ddc_pin)
|
||||
i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
|
||||
|
||||
return i2c;
|
||||
}
|
||||
@ -9316,6 +9421,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
unsigned long flags;
|
||||
uint32_t target_vblank, last_flip_vblank;
|
||||
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
|
||||
bool cursor_update = false;
|
||||
bool pflip_present = false;
|
||||
struct {
|
||||
struct dc_surface_update surface_updates[MAX_SURFACES];
|
||||
@ -9351,8 +9457,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
||||
|
||||
/* Cursor plane is handled after stream updates */
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
||||
if ((fb && crtc == pcrtc) ||
|
||||
(old_plane_state->fb && old_plane_state->crtc == pcrtc))
|
||||
cursor_update = true;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!fb || !crtc || pcrtc != crtc)
|
||||
continue;
|
||||
@ -9505,6 +9616,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
bundle->stream_update.vrr_infopacket =
|
||||
&acrtc_state->stream->vrr_infopacket;
|
||||
}
|
||||
} else if (cursor_update && acrtc_state->active_planes > 0 &&
|
||||
acrtc_attach->base.state->event) {
|
||||
drm_crtc_vblank_get(pcrtc);
|
||||
|
||||
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
|
||||
|
||||
acrtc_attach->event = acrtc_attach->base.state->event;
|
||||
acrtc_attach->base.state->event = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
/* Update the planes if changed or disable if we don't have any. */
|
||||
|
@ -547,6 +547,14 @@ struct amdgpu_display_manager {
|
||||
* last successfully applied backlight values.
|
||||
*/
|
||||
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
|
||||
|
||||
/**
|
||||
* @aux_hpd_discon_quirk:
|
||||
*
|
||||
* quirk for hpd discon while aux is on-going.
|
||||
* occurred on certain intel platform
|
||||
*/
|
||||
bool aux_hpd_discon_quirk;
|
||||
};
|
||||
|
||||
enum dsc_clock_force_state {
|
||||
|
@ -1226,12 +1226,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct dc_link *link = NULL;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
char *wr_buf = NULL;
|
||||
uint32_t wr_buf_size = 42;
|
||||
int max_param_num = 1;
|
||||
long param[1] = {0};
|
||||
uint8_t param_nums = 0;
|
||||
bool ret = false;
|
||||
|
||||
if (!aconnector || !aconnector->dc_link)
|
||||
return -EINVAL;
|
||||
@ -1267,7 +1269,11 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
|
||||
new_connection_type != dc_connection_none)
|
||||
goto unlock;
|
||||
|
||||
if (!dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD))
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
|
||||
if (!ret)
|
||||
goto unlock;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
@ -3290,7 +3296,10 @@ static int trigger_hpd_mst_set(void *data, u64 val)
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (aconnector->dc_link->type == dc_connection_mst_branch &&
|
||||
aconnector->mst_mgr.aux) {
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
|
||||
drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
|
||||
}
|
||||
}
|
||||
|
@ -667,6 +667,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
|
||||
|
||||
hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
|
||||
if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
|
||||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
|
||||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
|
||||
dc->ctx->dce_version == DCN_VERSION_3_16)
|
||||
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
|
||||
|
@ -56,6 +56,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
ssize_t result = 0;
|
||||
struct aux_payload payload;
|
||||
enum aux_return_code_type operation_result;
|
||||
struct amdgpu_device *adev;
|
||||
struct ddc_service *ddc;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
@ -74,6 +76,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
|
||||
&operation_result);
|
||||
|
||||
/*
|
||||
* w/a on certain intel platform where hpd is unexpected to pull low during
|
||||
* 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
|
||||
* aux transaction is succuess in such case, therefore bypass the error
|
||||
*/
|
||||
ddc = TO_DM_AUX(aux)->ddc_service;
|
||||
adev = ddc->ctx->driver_context;
|
||||
if (adev->dm.aux_hpd_discon_quirk) {
|
||||
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
|
||||
operation_result == AUX_RET_ERROR_HPD_DISCON) {
|
||||
result = 0;
|
||||
operation_result = AUX_RET_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
if (payload.write && result >= 0)
|
||||
result = msg->size;
|
||||
|
||||
@ -160,6 +177,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
|
||||
|
||||
dc_sink_release(dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
aconnector->edid = NULL;
|
||||
}
|
||||
drm_modeset_unlock(&root->mst_mgr.base.lock);
|
||||
}
|
||||
@ -411,6 +429,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
|
||||
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
aconnector->edid = NULL;
|
||||
}
|
||||
|
||||
return connection_status;
|
||||
|
@ -21,7 +21,6 @@
|
||||
#
|
||||
#
|
||||
# Makefile for Display Core (dc) component.
|
||||
#
|
||||
|
||||
DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
|
||||
|
||||
@ -36,6 +35,7 @@ DC_LIBS += dcn301
|
||||
DC_LIBS += dcn302
|
||||
DC_LIBS += dcn303
|
||||
DC_LIBS += dcn31
|
||||
DC_LIBS += dcn314
|
||||
DC_LIBS += dcn315
|
||||
DC_LIBS += dcn316
|
||||
DC_LIBS += dcn32
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "include/vector.h"
|
||||
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "ObjectID.h"
|
||||
@ -404,7 +402,7 @@ static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
|
||||
@ -607,8 +605,8 @@ static enum bp_result bios_parser_get_hpd_info(
|
||||
default:
|
||||
object = get_bios_object(bp, id);
|
||||
|
||||
if (!object)
|
||||
return BP_RESULT_BADINPUT;
|
||||
if (!object)
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
record = get_hpd_record(bp, object);
|
||||
|
||||
@ -812,10 +810,10 @@ static enum bp_result bios_parser_get_device_tag(
|
||||
/* getBiosObject will return MXM object */
|
||||
object = get_bios_object(bp, connector_object_id);
|
||||
|
||||
if (!object) {
|
||||
BREAK_TO_DEBUGGER(); /* Invalid object id */
|
||||
return BP_RESULT_BADINPUT;
|
||||
}
|
||||
if (!object) {
|
||||
BREAK_TO_DEBUGGER(); /* Invalid object id */
|
||||
return BP_RESULT_BADINPUT;
|
||||
}
|
||||
|
||||
info->acpi_device = 0; /* BIOS no longer provides this */
|
||||
info->dev_id = device_type_from_device_id(object->device_tag);
|
||||
@ -1598,7 +1596,7 @@ static bool bios_parser_is_device_id_supported(
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
static uint32_t bios_parser_get_ss_entry_number(
|
||||
@ -2081,6 +2079,7 @@ static enum bp_result bios_parser_get_encoder_cap_info(
|
||||
record = get_encoder_cap_record(bp, object);
|
||||
if (!record)
|
||||
return BP_RESULT_NORECORD;
|
||||
DC_LOG_BIOS("record->encodercaps 0x%x for object_id 0x%x", record->encodercaps, object_id.id);
|
||||
|
||||
info->DP_HBR2_CAP = (record->encodercaps &
|
||||
ATOM_ENCODER_CAP_RECORD_HBR2) ? 1 : 0;
|
||||
@ -2100,6 +2099,7 @@ static enum bp_result bios_parser_get_encoder_cap_info(
|
||||
ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0;
|
||||
info->DP_IS_USB_C = (record->encodercaps &
|
||||
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
|
||||
DC_LOG_BIOS("\t info->DP_IS_USB_C %d", info->DP_IS_USB_C);
|
||||
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
@ -2946,7 +2946,35 @@ static enum bp_result construct_integrated_info(
|
||||
|
||||
if (result != BP_RESULT_OK)
|
||||
return result;
|
||||
else {
|
||||
// Log each external path
|
||||
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
|
||||
if (info->ext_disp_conn_info.path[i].device_tag != 0)
|
||||
DC_LOG_BIOS("integrated_info:For EXTERNAL DISPLAY PATH %d --------------\n"
|
||||
"DEVICE_TAG: 0x%x\n"
|
||||
"DEVICE_ACPI_ENUM: 0x%x\n"
|
||||
"DEVICE_CONNECTOR_ID: 0x%x\n"
|
||||
"EXT_AUX_DDC_LUT_INDEX: %d\n"
|
||||
"EXT_HPD_PIN_LUT_INDEX: %d\n"
|
||||
"EXT_ENCODER_OBJ_ID: 0x%x\n"
|
||||
"Encoder CAPS: 0x%x\n",
|
||||
i,
|
||||
info->ext_disp_conn_info.path[i].device_tag,
|
||||
info->ext_disp_conn_info.path[i].device_acpi_enum,
|
||||
info->ext_disp_conn_info.path[i].device_connector_id.id,
|
||||
info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index,
|
||||
info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index,
|
||||
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
|
||||
info->ext_disp_conn_info.path[i].caps
|
||||
);
|
||||
}
|
||||
|
||||
// Log the Checksum and Voltage Swing
|
||||
DC_LOG_BIOS("Integrated info table CHECKSUM: %d\n"
|
||||
"Integrated info table FIX_DP_VOLTAGE_SWING: %d\n",
|
||||
info->ext_disp_conn_info.checksum,
|
||||
info->ext_disp_conn_info.fixdpvoltageswing);
|
||||
}
|
||||
/* Sort voltage table from low to high*/
|
||||
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
|
||||
for (j = i; j > 0; --j) {
|
||||
|
@ -75,6 +75,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
|
||||
case DCN_VERSION_3_02:
|
||||
case DCN_VERSION_3_03:
|
||||
case DCN_VERSION_3_1:
|
||||
case DCN_VERSION_3_14:
|
||||
case DCN_VERSION_3_15:
|
||||
case DCN_VERSION_3_16:
|
||||
case DCN_VERSION_3_2:
|
||||
|
@ -154,6 +154,15 @@ AMD_DAL_CLK_MGR_DCN31 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn31/,$(CLK_MGR_DC
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN31)
|
||||
|
||||
###############################################################################
|
||||
# DCN314
|
||||
###############################################################################
|
||||
CLK_MGR_DCN314 = dcn314_smu.o dcn314_clk_mgr.o
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN314 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn314/,$(CLK_MGR_DCN314))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN314)
|
||||
|
||||
###############################################################################
|
||||
# DCN315
|
||||
###############################################################################
|
||||
|
@ -43,11 +43,11 @@
|
||||
#include "dcn30/dcn30_clk_mgr.h"
|
||||
#include "dcn301/vg_clk_mgr.h"
|
||||
#include "dcn31/dcn31_clk_mgr.h"
|
||||
#include "dcn314/dcn314_clk_mgr.h"
|
||||
#include "dcn315/dcn315_clk_mgr.h"
|
||||
#include "dcn316/dcn316_clk_mgr.h"
|
||||
#include "dcn32/dcn32_clk_mgr.h"
|
||||
|
||||
|
||||
int clk_mgr_helper_get_active_display_cnt(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
@ -58,6 +58,12 @@ int clk_mgr_helper_get_active_display_cnt(
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
const struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
/* Don't count SubVP phantom pipes as part of active
|
||||
* display count
|
||||
*/
|
||||
if (stream->mall_stream_config.type == SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Only notify active stream or virtual stream.
|
||||
* Need to notify virtual stream to work around
|
||||
@ -281,6 +287,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
return &clk_mgr->base.base;
|
||||
}
|
||||
break;
|
||||
|
||||
case FAMILY_YELLOW_CARP: {
|
||||
struct clk_mgr_dcn31 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
|
||||
|
||||
@ -329,6 +336,20 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
return &clk_mgr->base;
|
||||
break;
|
||||
}
|
||||
|
||||
case AMDGPU_FAMILY_GC_11_0_2: {
|
||||
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
|
||||
|
||||
if (clk_mgr == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dcn314_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
return &clk_mgr->base.base;
|
||||
}
|
||||
break;
|
||||
|
||||
#endif
|
||||
default:
|
||||
ASSERT(0); /* Unknown Asic */
|
||||
@ -375,6 +396,11 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
dcn32_clk_mgr_destroy(clk_mgr);
|
||||
break;
|
||||
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
dcn314_clk_mgr_destroy(clk_mgr);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -101,9 +101,9 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
uint32_t result;
|
||||
|
||||
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
if (result != VBIOSSMC_Result_OK)
|
||||
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY) {
|
||||
return -1;
|
||||
@ -188,6 +188,10 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
#ifdef DBG
|
||||
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||||
#endif
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,8 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
|
||||
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
if (result != VBIOSSMC_Result_OK)
|
||||
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY) {
|
||||
return -1;
|
||||
@ -179,6 +180,10 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
#ifdef DBG
|
||||
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||||
#endif
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
|
@ -108,9 +108,9 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
uint32_t result;
|
||||
|
||||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
if (result != VBIOSSMC_Result_OK)
|
||||
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY) {
|
||||
return -1;
|
||||
@ -202,6 +202,10 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
#ifdef DBG
|
||||
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||||
#endif
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
|
751
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
Normal file
751
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
Normal file
@ -0,0 +1,751 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "dcn314_clk_mgr.h"
|
||||
|
||||
#include "dccg.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
// For dce12_get_dp_ref_freq_khz
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
|
||||
// For dcn20_update_clocks_update_dpp_dto
|
||||
#include "dcn20/dcn20_clk_mgr.h"
|
||||
|
||||
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "core_types.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
/* TODO: remove this include once we ported over remaining clk mgr functions*/
|
||||
#include "dcn30/dcn30_clk_mgr.h"
|
||||
#include "dcn31/dcn31_clk_mgr.h"
|
||||
|
||||
#include "dc_dmub_srv.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dcn314_smu.h"
|
||||
|
||||
#define MAX_INSTANCE 7
|
||||
#define MAX_SEGMENT 8
|
||||
|
||||
struct IP_BASE_INSTANCE {
|
||||
unsigned int segment[MAX_SEGMENT];
|
||||
};
|
||||
|
||||
struct IP_BASE {
|
||||
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
|
||||
};
|
||||
|
||||
static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } };
|
||||
|
||||
#define regCLK1_CLK_PLL_REQ 0x0237
|
||||
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
|
||||
|
||||
#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
|
||||
#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
|
||||
#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
|
||||
#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
|
||||
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
|
||||
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
|
||||
|
||||
#define REG(reg_name) \
|
||||
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
|
||||
|
||||
#define TO_CLK_MGR_DCN314(clk_mgr)\
|
||||
container_of(clk_mgr, struct clk_mgr_dcn314, base)
|
||||
|
||||
static int dcn314_get_active_display_cnt_wa(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i, display_count;
|
||||
bool tmds_present = false;
|
||||
|
||||
display_count = 0;
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
const struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
tmds_present = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
const struct dc_link *link = dc->links[i];
|
||||
|
||||
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
|
||||
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
|
||||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
display_count++;
|
||||
}
|
||||
|
||||
/* WA for hang on HDMI after display off back on*/
|
||||
if (display_count == 0 && tmds_present)
|
||||
display_count = 1;
|
||||
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
|
||||
dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable)
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
bool safe_to_lower)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int display_count;
|
||||
bool update_dppclk = false;
|
||||
bool update_dispclk = false;
|
||||
bool dpp_clock_lowered = false;
|
||||
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
* also if safe to lower is false, we just go in the higher state
|
||||
*/
|
||||
if (safe_to_lower) {
|
||||
if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
|
||||
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
|
||||
dcn314_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
|
||||
dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
|
||||
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
|
||||
}
|
||||
|
||||
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
|
||||
dcn314_smu_set_dtbclk(clk_mgr, false);
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
/* check that we're not already in lower */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
|
||||
display_count = dcn314_get_active_display_cnt_wa(dc, context);
|
||||
/* if we can go lower, go lower */
|
||||
if (display_count == 0) {
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||
idle_info.idle_info.s0i2_rdy = 1;
|
||||
dcn314_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
|
||||
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
|
||||
dcn314_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
|
||||
dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
|
||||
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
|
||||
}
|
||||
|
||||
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
|
||||
dcn314_smu_set_dtbclk(clk_mgr, true);
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
|
||||
/* check that we're not already in D0 */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
|
||||
dcn314_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
|
||||
}
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
|
||||
dcn314_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower,
|
||||
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
|
||||
dcn314_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
|
||||
}
|
||||
|
||||
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
if (new_clocks->dppclk_khz < 100000)
|
||||
new_clocks->dppclk_khz = 100000;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
|
||||
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
|
||||
dpp_clock_lowered = true;
|
||||
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
|
||||
update_dppclk = true;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn314_disable_otg_wa(clk_mgr_base, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
||||
if (dpp_clock_lowered) {
|
||||
// increase per DPP DTO before lowering global dppclk
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
dcn314_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
||||
} else {
|
||||
// increase global DPPCLK before lowering per DPP DTO
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn314_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
||||
// always update dtos unless clock is lowered and not safe to lower
|
||||
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
}
|
||||
|
||||
// notify DMCUB of latest clocks
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
|
||||
cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
|
||||
cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
|
||||
cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
|
||||
clk_mgr_base->clks.dcfclk_deep_sleep_khz;
|
||||
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
|
||||
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
||||
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
/* get FbMult value */
|
||||
struct fixed31_32 pll_req;
|
||||
unsigned int fbmult_frac_val = 0;
|
||||
unsigned int fbmult_int_val = 0;
|
||||
|
||||
/*
|
||||
* Register value of fbmult is in 8.16 format, we are converting to 314.32
|
||||
* to leverage the fix point operations available in driver
|
||||
*/
|
||||
|
||||
REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
|
||||
REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
|
||||
|
||||
pll_req = dc_fixpt_from_int(fbmult_int_val);
|
||||
|
||||
/*
|
||||
* since fractional part is only 16 bit in register definition but is 32 bit
|
||||
* in our fix point definiton, need to shift left by 16 to obtain correct value
|
||||
*/
|
||||
pll_req.value |= fbmult_frac_val << 16;
|
||||
|
||||
/* multiply by REFCLK period */
|
||||
pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
|
||||
|
||||
/* integer part is now VCO frequency in kHz */
|
||||
return dc_fixpt_floor(pll_req);
|
||||
}
|
||||
|
||||
static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
|
||||
dcn314_smu_enable_pme_wa(clk_mgr);
|
||||
}
|
||||
|
||||
void dcn314_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
|
||||
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
|
||||
}
|
||||
|
||||
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b)
|
||||
{
|
||||
if (a->dispclk_khz != b->dispclk_khz)
|
||||
return false;
|
||||
else if (a->dppclk_khz != b->dppclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_khz != b->dcfclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
|
||||
return false;
|
||||
else if (a->zstate_support != b->zstate_support)
|
||||
return false;
|
||||
else if (a->dtbclk_en != b->dtbclk_en)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
|
||||
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static struct clk_bw_params dcn314_bw_params = {
|
||||
.vram_type = Ddr4MemType,
|
||||
.num_channels = 1,
|
||||
.clk_table = {
|
||||
.num_entries = 4,
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
static struct wm_table ddr5_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr5_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static DpmClocks_t dummy_clocks;
|
||||
|
||||
static struct dcn314_watermarks dummy_wms = { 0 };
|
||||
|
||||
static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
|
||||
{
|
||||
int i, num_valid_sets;
|
||||
|
||||
num_valid_sets = 0;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
/* skip empty entries, the smu array has no holes*/
|
||||
if (!bw_params->wm_table.entries[i].valid)
|
||||
continue;
|
||||
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
|
||||
/* We will not select WM based on fclk, so leave it as unconstrained */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
|
||||
|
||||
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
|
||||
if (i == 0)
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
|
||||
else {
|
||||
/* add 1 to make it non-overlapping with next lvl */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
|
||||
bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
|
||||
}
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz;
|
||||
|
||||
} else {
|
||||
/* unconstrained for memory retraining */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
|
||||
|
||||
/* Modify previous watermark range to cover up to max */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
|
||||
}
|
||||
num_valid_sets++;
|
||||
}
|
||||
|
||||
ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
|
||||
|
||||
/* modify the min and max to make sure we cover the whole range*/
|
||||
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
|
||||
|
||||
/* This is for writeback only, does not matter currently as no writeback support*/
|
||||
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
|
||||
}
|
||||
|
||||
static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr);
|
||||
struct dcn314_watermarks *table = clk_mgr_dcn314->smu_wm_set.wm_set;
|
||||
|
||||
if (!clk_mgr->smu_ver)
|
||||
return;
|
||||
|
||||
if (!table || clk_mgr_dcn314->smu_wm_set.mc_address.quad_part == 0)
|
||||
return;
|
||||
|
||||
memset(table, 0, sizeof(*table));
|
||||
|
||||
dcn314_build_watermark_ranges(clk_mgr_base->bw_params, table);
|
||||
|
||||
dcn314_smu_set_dram_addr_high(clk_mgr,
|
||||
clk_mgr_dcn314->smu_wm_set.mc_address.high_part);
|
||||
dcn314_smu_set_dram_addr_low(clk_mgr,
|
||||
clk_mgr_dcn314->smu_wm_set.mc_address.low_part);
|
||||
dcn314_smu_transfer_wm_table_dram_2_smu(clk_mgr);
|
||||
}
|
||||
|
||||
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
|
||||
struct dcn314_smu_dpm_clks *smu_dpm_clks)
|
||||
{
|
||||
DpmClocks_t *table = smu_dpm_clks->dpm_clks;
|
||||
|
||||
if (!clk_mgr->smu_ver)
|
||||
return;
|
||||
|
||||
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
|
||||
return;
|
||||
|
||||
memset(table, 0, sizeof(*table));
|
||||
|
||||
dcn314_smu_set_dram_addr_high(clk_mgr,
|
||||
smu_dpm_clks->mc_address.high_part);
|
||||
dcn314_smu_set_dram_addr_low(clk_mgr,
|
||||
smu_dpm_clks->mc_address.low_part);
|
||||
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
|
||||
}
|
||||
|
||||
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
|
||||
{
|
||||
uint32_t max = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_clocks; ++i) {
|
||||
if (clocks[i] > max)
|
||||
max = clocks[i];
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
static unsigned int find_clk_for_voltage(
|
||||
const DpmClocks_t *clock_table,
|
||||
const uint32_t clocks[],
|
||||
unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
int max_voltage = 0;
|
||||
int clock = 0;
|
||||
|
||||
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
|
||||
if (clock_table->SocVoltage[i] == voltage) {
|
||||
return clocks[i];
|
||||
} else if (clock_table->SocVoltage[i] >= max_voltage &&
|
||||
clock_table->SocVoltage[i] < voltage) {
|
||||
max_voltage = clock_table->SocVoltage[i];
|
||||
clock = clocks[i];
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(clock);
|
||||
return clock;
|
||||
}
|
||||
|
||||
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
|
||||
struct integrated_info *bios_info,
|
||||
const DpmClocks_t *clock_table)
|
||||
{
|
||||
int i, j;
|
||||
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
|
||||
uint32_t max_dispclk = 0, max_dppclk = 0;
|
||||
|
||||
j = -1;
|
||||
|
||||
ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
|
||||
|
||||
/* Find lowest DPM, FCLK is filled in reverse order*/
|
||||
|
||||
for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
|
||||
if (clock_table->DfPstateTable[i].FClk != 0) {
|
||||
j = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (j == -1) {
|
||||
/* clock table is all 0s, just use our own hardcode */
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
bw_params->clk_table.num_entries = j + 1;
|
||||
|
||||
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
|
||||
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
|
||||
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
|
||||
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
|
||||
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
|
||||
switch (clock_table->DfPstateTable[j].WckRatio) {
|
||||
case WCK_RATIO_1_2:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 2;
|
||||
break;
|
||||
case WCK_RATIO_1_4:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 4;
|
||||
break;
|
||||
default:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 1;
|
||||
}
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
|
||||
bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
|
||||
}
|
||||
|
||||
bw_params->vram_type = bios_info->memory_type;
|
||||
bw_params->num_channels = bios_info->ma_channel_number;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
bw_params->wm_table.entries[i].wm_inst = i;
|
||||
|
||||
if (i >= bw_params->clk_table.num_entries) {
|
||||
bw_params->wm_table.entries[i].valid = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
|
||||
bw_params->wm_table.entries[i].valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
static struct clk_mgr_funcs dcn314_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
.update_clocks = dcn314_update_clocks,
|
||||
.init_clocks = dcn314_init_clocks,
|
||||
.enable_pme_wa = dcn314_enable_pme_wa,
|
||||
.are_clock_states_equal = dcn314_are_clock_states_equal,
|
||||
.notify_wm_ranges = dcn314_notify_wm_ranges
|
||||
};
|
||||
extern struct clk_mgr_funcs dcn3_fpga_funcs;
|
||||
|
||||
void dcn314_clk_mgr_construct(
|
||||
struct dc_context *ctx,
|
||||
struct clk_mgr_dcn314 *clk_mgr,
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg)
|
||||
{
|
||||
struct dcn314_smu_dpm_clks smu_dpm_clks = { 0 };
|
||||
|
||||
clk_mgr->base.base.ctx = ctx;
|
||||
clk_mgr->base.base.funcs = &dcn314_funcs;
|
||||
|
||||
clk_mgr->base.pp_smu = pp_smu;
|
||||
|
||||
clk_mgr->base.dccg = dccg;
|
||||
clk_mgr->base.dfs_bypass_disp_clk = 0;
|
||||
|
||||
clk_mgr->base.dprefclk_ss_percentage = 0;
|
||||
clk_mgr->base.dprefclk_ss_divider = 1000;
|
||||
clk_mgr->base.ss_on_dprefclk = false;
|
||||
clk_mgr->base.dfs_ref_freq_khz = 48000;
|
||||
|
||||
clk_mgr->smu_wm_set.wm_set = (struct dcn314_watermarks *)dm_helpers_allocate_gpu_mem(
|
||||
clk_mgr->base.base.ctx,
|
||||
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
sizeof(struct dcn314_watermarks),
|
||||
&clk_mgr->smu_wm_set.mc_address.quad_part);
|
||||
|
||||
if (!clk_mgr->smu_wm_set.wm_set) {
|
||||
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
|
||||
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
|
||||
}
|
||||
ASSERT(clk_mgr->smu_wm_set.wm_set);
|
||||
|
||||
smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
|
||||
clk_mgr->base.base.ctx,
|
||||
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
sizeof(DpmClocks_t),
|
||||
&smu_dpm_clks.mc_address.quad_part);
|
||||
|
||||
if (smu_dpm_clks.dpm_clks == NULL) {
|
||||
smu_dpm_clks.dpm_clks = &dummy_clocks;
|
||||
smu_dpm_clks.mc_address.quad_part = 0;
|
||||
}
|
||||
|
||||
ASSERT(smu_dpm_clks.dpm_clks);
|
||||
|
||||
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
|
||||
clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
|
||||
} else {
|
||||
struct clk_log_info log_info = {0};
|
||||
|
||||
clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base);
|
||||
|
||||
if (clk_mgr->base.smu_ver)
|
||||
clk_mgr->base.smu_present = true;
|
||||
|
||||
/* TODO: Check we get what we expect during bringup */
|
||||
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
|
||||
|
||||
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
|
||||
dcn314_bw_params.wm_table = lpddr5_wm_table;
|
||||
else
|
||||
dcn314_bw_params.wm_table = ddr5_wm_table;
|
||||
|
||||
/* Saved clocks configured at boot for debug purposes */
|
||||
dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
|
||||
|
||||
}
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
|
||||
//clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn314_bw_params;
|
||||
|
||||
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
|
||||
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
|
||||
|
||||
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
dcn314_clk_mgr_helper_populate_bw_params(
|
||||
&clk_mgr->base,
|
||||
ctx->dc_bios->integrated_info,
|
||||
smu_dpm_clks.dpm_clks);
|
||||
}
|
||||
}
|
||||
|
||||
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
|
||||
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
smu_dpm_clks.dpm_clks);
|
||||
}
|
||||
|
||||
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
|
||||
{
|
||||
struct clk_mgr_dcn314 *clk_mgr = TO_CLK_MGR_DCN314(clk_mgr_int);
|
||||
|
||||
if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
|
||||
dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
clk_mgr->smu_wm_set.wm_set);
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DCN314_CLK_MGR_H__
|
||||
#define __DCN314_CLK_MGR_H__
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
struct dcn314_watermarks;
|
||||
|
||||
struct dcn314_smu_watermark_set {
|
||||
struct dcn314_watermarks *wm_set;
|
||||
union large_integer mc_address;
|
||||
};
|
||||
|
||||
struct clk_mgr_dcn314 {
|
||||
struct clk_mgr_internal base;
|
||||
struct dcn314_smu_watermark_set smu_wm_set;
|
||||
};
|
||||
|
||||
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b);
|
||||
void dcn314_init_clocks(struct clk_mgr *clk_mgr);
|
||||
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
bool safe_to_lower);
|
||||
|
||||
void dcn314_clk_mgr_construct(struct dc_context *ctx,
|
||||
struct clk_mgr_dcn314 *clk_mgr,
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg);
|
||||
|
||||
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
|
||||
|
||||
#endif //__DCN314_CLK_MGR_H__
|
391
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
Normal file
391
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
Normal file
@ -0,0 +1,391 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "core_types.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
#include "reg_helper.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "dcn314_smu.h"
|
||||
|
||||
#include "mp/mp_13_0_5_offset.h"
|
||||
|
||||
/* TODO: Use the real headers when they're correct */
|
||||
#define MP1_BASE__INST0_SEG0 0x00016000
|
||||
#define MP1_BASE__INST0_SEG1 0x0243FC00
|
||||
#define MP1_BASE__INST0_SEG2 0x00DC0000
|
||||
#define MP1_BASE__INST0_SEG3 0x00E00000
|
||||
#define MP1_BASE__INST0_SEG4 0x00E40000
|
||||
#define MP1_BASE__INST0_SEG5 0
|
||||
|
||||
#ifdef BASE_INNER
|
||||
#undef BASE_INNER
|
||||
#endif
|
||||
|
||||
#define BASE_INNER(seg) MP1_BASE__INST0_SEG ## seg
|
||||
|
||||
#define BASE(seg) BASE_INNER(seg)
|
||||
|
||||
#define REG(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
|
||||
|
||||
#define FN(reg_name, field) \
|
||||
FD(reg_name##__##field)
|
||||
|
||||
#include "logger_types.h"
|
||||
#undef DC_LOGGER
|
||||
#define DC_LOGGER \
|
||||
CTX->logger
|
||||
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||||
|
||||
#define VBIOSSMC_MSG_TestMessage 0x1
|
||||
#define VBIOSSMC_MSG_GetSmuVersion 0x2
|
||||
#define VBIOSSMC_MSG_PowerUpGfx 0x3
|
||||
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
|
||||
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5 //Not used. DPRef is constant
|
||||
#define VBIOSSMC_MSG_SetDppclkFreq 0x6
|
||||
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7
|
||||
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8
|
||||
#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x9 //Keep it in case VMIN dees not support phy clk
|
||||
#define VBIOSSMC_MSG_GetFclkFrequency 0xA
|
||||
#define VBIOSSMC_MSG_SetDisplayCount 0xB //Not used anymore
|
||||
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC //Not used anymore
|
||||
#define VBIOSSMC_MSG_UpdatePmeRestore 0xD
|
||||
#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr
|
||||
#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF
|
||||
#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10
|
||||
#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11
|
||||
#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12
|
||||
#define VBIOSSMC_MSG_GetDprefclkFreq 0x13
|
||||
#define VBIOSSMC_MSG_GetDtbclkFreq 0x14
|
||||
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
|
||||
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
|
||||
#define VBIOSSMC_MSG_SetDtbClk 0x17
|
||||
#define VBIOSSMC_Message_Count 0x18
|
||||
|
||||
#define VBIOSSMC_Status_BUSY 0x0
|
||||
#define VBIOSSMC_Result_OK 0x1
|
||||
#define VBIOSSMC_Result_Failed 0xFF
|
||||
#define VBIOSSMC_Result_UnknownCmd 0xFE
|
||||
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
|
||||
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
|
||||
|
||||
/*
|
||||
* Function to be used instead of REG_WAIT macro because the wait ends when
|
||||
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
|
||||
* won't work with REG_WAIT.
|
||||
*/
|
||||
static uint32_t dcn314_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
|
||||
{
|
||||
uint32_t res_val = VBIOSSMC_Status_BUSY;
|
||||
|
||||
do {
|
||||
res_val = REG_READ(MP1_SMN_C2PMSG_91);
|
||||
if (res_val != VBIOSSMC_Status_BUSY)
|
||||
break;
|
||||
|
||||
if (delay_us >= 1000)
|
||||
msleep(delay_us/1000);
|
||||
else if (delay_us > 0)
|
||||
udelay(delay_us);
|
||||
} while (max_retries--);
|
||||
|
||||
return res_val;
|
||||
}
|
||||
|
||||
static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
unsigned int msg_id,
|
||||
unsigned int param)
|
||||
{
|
||||
uint32_t result;
|
||||
|
||||
result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY)
|
||||
return -1;
|
||||
|
||||
/* First clear response register */
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
|
||||
|
||||
/* Set the parameter register for the SMU message, unit is Mhz */
|
||||
REG_WRITE(MP1_SMN_C2PMSG_83, param);
|
||||
|
||||
/* Trigger the message transaction by writing the message ID */
|
||||
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
|
||||
|
||||
result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
if (result == VBIOSSMC_Result_Failed) {
|
||||
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
|
||||
param == TABLE_WATERMARKS)
|
||||
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
|
||||
else
|
||||
ASSERT(0);
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_SMU_TIMEOUT(result)) {
|
||||
ASSERT(0);
|
||||
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
|
||||
}
|
||||
|
||||
return REG_READ(MP1_SMN_C2PMSG_83);
|
||||
}
|
||||
|
||||
int dcn314_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
return dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_GetSmuVersion,
|
||||
0);
|
||||
}
|
||||
|
||||
|
||||
int dcn314_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
|
||||
{
|
||||
int actual_dispclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_dispclk_khz;
|
||||
|
||||
/* Unit of SMU msg parameter is Mhz */
|
||||
actual_dispclk_set_mhz = dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
khz_to_mhz_ceil(requested_dispclk_khz));
|
||||
|
||||
return actual_dispclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn314_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
int actual_dprefclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return clk_mgr->base.dprefclk_khz;
|
||||
|
||||
actual_dprefclk_set_mhz = dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDprefclkFreq,
|
||||
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
|
||||
|
||||
/* TODO: add code for programing DP DTO, currently this is down by command table */
|
||||
|
||||
return actual_dprefclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
|
||||
{
|
||||
int actual_dcfclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
|
||||
return -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_dcfclk_khz;
|
||||
|
||||
actual_dcfclk_set_mhz = dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn314_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
|
||||
{
|
||||
int actual_min_ds_dcfclk_mhz = -1;
|
||||
|
||||
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
|
||||
return -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_min_ds_dcfclk_khz;
|
||||
|
||||
actual_min_ds_dcfclk_mhz = dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
|
||||
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
|
||||
|
||||
return actual_min_ds_dcfclk_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn314_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
|
||||
{
|
||||
int actual_dppclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_dpp_khz;
|
||||
|
||||
actual_dppclk_set_mhz = dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDppclkFreq,
|
||||
khz_to_mhz_ceil(requested_dpp_khz));
|
||||
|
||||
return actual_dppclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
void dcn314_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
|
||||
{
|
||||
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
|
||||
return;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
//TODO: Work with smu team to define optimization options.
|
||||
dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
|
||||
idle_info);
|
||||
}
|
||||
|
||||
void dcn314_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
{
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||
}
|
||||
|
||||
dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
|
||||
idle_info.data);
|
||||
}
|
||||
|
||||
void dcn314_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_UpdatePmeRestore,
|
||||
0);
|
||||
}
|
||||
|
||||
void dcn314_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn314_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
|
||||
}
|
||||
|
||||
void dcn314_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn314_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
|
||||
}
|
||||
|
||||
void dcn314_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn314_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
|
||||
}
|
||||
|
||||
void dcn314_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn314_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
|
||||
}
|
||||
|
||||
void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
|
||||
{
|
||||
unsigned int msg_id, param;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
|
||||
(support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
|
||||
support = DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
|
||||
|
||||
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
|
||||
// Arg[16] = Disallow Z9 -> new bit
|
||||
switch (support) {
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_ALLOW:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = 9;
|
||||
break;
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_DISALLOW:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = 8;
|
||||
break;
|
||||
|
||||
|
||||
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = 0x00010008;
|
||||
break;
|
||||
|
||||
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
param = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
msg_id,
|
||||
param);
|
||||
|
||||
}
|
||||
|
||||
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
|
||||
void dcn314_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn314_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDtbClk,
|
||||
enable);
|
||||
}
|
79
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
Normal file
79
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
Normal file
@ -0,0 +1,79 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DAL_DC_314_SMU_H_
|
||||
#define DAL_DC_314_SMU_H_
|
||||
|
||||
#include "smu13_driver_if_v13_0_4.h"
|
||||
|
||||
typedef enum {
|
||||
WCK_RATIO_1_1 = 0, // DDR5, Wck:ck is always 1:1;
|
||||
WCK_RATIO_1_2,
|
||||
WCK_RATIO_1_4,
|
||||
WCK_RATIO_MAX
|
||||
} WCK_RATIO_e;
|
||||
|
||||
struct dcn314_watermarks {
|
||||
// Watermarks
|
||||
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
|
||||
uint32_t MmHubPadding[7]; // SMU internal use
|
||||
};
|
||||
|
||||
struct dcn314_smu_dpm_clks {
|
||||
DpmClocks_t *dpm_clks;
|
||||
union large_integer mc_address;
|
||||
};
|
||||
|
||||
struct display_idle_optimization {
|
||||
unsigned int df_request_disabled : 1;
|
||||
unsigned int phy_ref_clk_off : 1;
|
||||
unsigned int s0i2_rdy : 1;
|
||||
unsigned int reserved : 29;
|
||||
};
|
||||
|
||||
union display_idle_optimization_u {
|
||||
struct display_idle_optimization idle_info;
|
||||
uint32_t data;
|
||||
};
|
||||
|
||||
int dcn314_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
|
||||
int dcn314_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
|
||||
int dcn314_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
|
||||
int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
|
||||
int dcn314_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
|
||||
int dcn314_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
|
||||
void dcn314_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info);
|
||||
void dcn314_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
|
||||
void dcn314_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
|
||||
void dcn314_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
|
||||
void dcn314_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
|
||||
void dcn314_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
|
||||
void dcn314_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
|
||||
|
||||
void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support);
|
||||
void dcn314_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
|
||||
|
||||
#endif /* DAL_DC_314_SMU_H_ */
|
@ -173,11 +173,14 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn315_disable_otg_wa(clk_mgr_base, true);
|
||||
/* No need to apply the w/a if we haven't taken over from bios yet */
|
||||
if (clk_mgr_base->clks.dispclk_khz)
|
||||
dcn315_disable_otg_wa(clk_mgr_base, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn315_disable_otg_wa(clk_mgr_base, false);
|
||||
if (clk_mgr_base->clks.dispclk_khz)
|
||||
dcn315_disable_otg_wa(clk_mgr_base, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
@ -136,9 +136,9 @@ static int dcn315_smu_send_msg_with_param(
|
||||
uint32_t result;
|
||||
|
||||
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
if (result != VBIOSSMC_Result_OK)
|
||||
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY) {
|
||||
return -1;
|
||||
@ -205,6 +205,10 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
#ifdef DBG
|
||||
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||||
#endif
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
|
@ -124,9 +124,9 @@ static int dcn316_smu_send_msg_with_param(
|
||||
uint32_t result;
|
||||
|
||||
result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
smu_print("SMU response after wait: %d\n", result);
|
||||
if (result != VBIOSSMC_Result_OK)
|
||||
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY) {
|
||||
return -1;
|
||||
@ -191,6 +191,10 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
#ifdef DBG
|
||||
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||||
#endif
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
|
@ -323,6 +323,8 @@ static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
|
||||
if (pipe_ctx->stream_res.audio != NULL)
|
||||
dto_params.req_audio_dtbclk_khz = 24000;
|
||||
}
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
|
||||
dto_params.is_hdmi = true;
|
||||
|
||||
dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
|
||||
//dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
|
||||
|
@ -100,9 +100,10 @@ void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool e
|
||||
|
||||
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways)
|
||||
{
|
||||
smu_print("Numways for SubVP : %d\n", num_ways);
|
||||
uint32_t param = (num_ways << 1) | (num_ways > 0);
|
||||
|
||||
dcn32_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, num_ways, NULL);
|
||||
dcn32_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetCabForUclkPstate, param, NULL);
|
||||
smu_print("Numways for SubVP : %d\n", num_ways);
|
||||
}
|
||||
|
||||
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
|
||||
|
@ -22,9 +22,6 @@
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "dc.h"
|
||||
@ -347,10 +344,16 @@ static bool create_link_encoders(struct dc *dc)
|
||||
*/
|
||||
static void destroy_link_encoders(struct dc *dc)
|
||||
{
|
||||
unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
|
||||
unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
|
||||
unsigned int num_usb4_dpia;
|
||||
unsigned int num_dig_link_enc;
|
||||
int i;
|
||||
|
||||
if (!dc->res_pool)
|
||||
return;
|
||||
|
||||
num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
|
||||
num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
|
||||
|
||||
/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
|
||||
* link encoders and physical display endpoints and does not require
|
||||
* additional link encoder objects.
|
||||
@ -859,6 +862,8 @@ static bool dc_construct_ctx(struct dc *dc,
|
||||
dc_ctx->dc_sink_id_count = 0;
|
||||
dc_ctx->dc_stream_id_count = 0;
|
||||
dc_ctx->dce_environment = init_params->dce_environment;
|
||||
dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
|
||||
dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
|
||||
|
||||
/* Create logger */
|
||||
|
||||
@ -1082,6 +1087,16 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
||||
dc->current_state->stream_count != context->stream_count)
|
||||
should_disable = true;
|
||||
|
||||
if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
|
||||
struct pipe_ctx *old_pipe, *new_pipe;
|
||||
|
||||
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (old_pipe->plane_state && !new_pipe->plane_state)
|
||||
should_disable = true;
|
||||
}
|
||||
|
||||
if (should_disable && old_stream) {
|
||||
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
|
||||
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
|
||||
@ -1228,6 +1243,9 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
||||
}
|
||||
|
||||
dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
|
||||
dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
|
||||
|
||||
/* Populate versioning information */
|
||||
dc->versions.dc_ver = DC_VER;
|
||||
|
||||
@ -1333,7 +1351,9 @@ static void program_timing_sync(
|
||||
struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
|
||||
|
||||
for (i = 0; i < pipe_count; i++) {
|
||||
if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
|
||||
if (!ctx->res_ctx.pipe_ctx[i].stream
|
||||
|| ctx->res_ctx.pipe_ctx[i].top_pipe
|
||||
|| ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
|
||||
@ -1742,8 +1762,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
|
||||
result = dc->hwss.apply_ctx_to_hw(dc, context);
|
||||
|
||||
if (result != DC_OK)
|
||||
if (result != DC_OK) {
|
||||
/* Application of dc_state to hardware stopped. */
|
||||
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
|
||||
return result;
|
||||
}
|
||||
|
||||
dc_trigger_sync(dc, context);
|
||||
|
||||
@ -1905,7 +1928,8 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->plane_state)
|
||||
// Don't check flip pending on phantom pipes
|
||||
if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
|
||||
continue;
|
||||
|
||||
/* Must set to false to start with, due to OR in update function */
|
||||
@ -2406,6 +2430,96 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
||||
return overall_type;
|
||||
}
|
||||
|
||||
static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
|
||||
{
|
||||
int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
|
||||
|
||||
view_height = src.height;
|
||||
view_width = src.width;
|
||||
|
||||
clip_x = clip_rect.x;
|
||||
clip_y = clip_rect.y;
|
||||
|
||||
clip_width = clip_rect.width;
|
||||
clip_height = clip_rect.height;
|
||||
|
||||
/* check for centered video accounting for off by 1 scaling truncation */
|
||||
if ((view_height - clip_y - clip_height <= clip_y + 1) &&
|
||||
(view_width - clip_x - clip_width <= clip_x + 1) &&
|
||||
(view_height - clip_y - clip_height >= clip_y - 1) &&
|
||||
(view_width - clip_x - clip_width >= clip_x - 1)) {
|
||||
|
||||
/* when OS scales up/down to letter box, it may end up
|
||||
* with few blank pixels on the border due to truncating.
|
||||
* Add offset margin to account for this
|
||||
*/
|
||||
if (clip_x <= 4 || clip_y <= 4)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates, int surface_count,
|
||||
enum surface_update_type update_type)
|
||||
{
|
||||
enum surface_update_type new_update_type = update_type;
|
||||
int i, j;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
struct dc_stream_state *stream;
|
||||
|
||||
/* Check that we are in windowed MPO with ODM
|
||||
* - look for MPO pipe by scanning pipes for first pipe matching
|
||||
* surface that has moved ( position change )
|
||||
* - MPO pipe will have top pipe
|
||||
* - check that top pipe has ODM pointer
|
||||
*/
|
||||
if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
if (srf_updates[i].surface && srf_updates[i].scaling_info
|
||||
&& srf_updates[i].surface->update_flags.bits.position_change) {
|
||||
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[j];
|
||||
stream = pipe->stream;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
|
||||
&& !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
|
||||
struct rect old_clip_rect, new_clip_rect;
|
||||
bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
|
||||
bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
|
||||
|
||||
old_clip_rect = srf_updates[i].surface->clip_rect;
|
||||
new_clip_rect = srf_updates[i].scaling_info->clip_rect;
|
||||
|
||||
old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
|
||||
old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
|
||||
old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
|
||||
|
||||
new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
|
||||
new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
|
||||
new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
|
||||
|
||||
if (old_clip_rect_left && new_clip_rect_middle)
|
||||
new_update_type = UPDATE_TYPE_FULL;
|
||||
else if (old_clip_rect_middle && new_clip_rect_right)
|
||||
new_update_type = UPDATE_TYPE_FULL;
|
||||
else if (old_clip_rect_right && new_clip_rect_middle)
|
||||
new_update_type = UPDATE_TYPE_FULL;
|
||||
else if (old_clip_rect_middle && new_clip_rect_left)
|
||||
new_update_type = UPDATE_TYPE_FULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new_update_type;
|
||||
}
|
||||
|
||||
/*
|
||||
* dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
|
||||
*
|
||||
@ -2437,6 +2551,10 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
if (type == UPDATE_TYPE_MED)
|
||||
type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
|
||||
updates, surface_count, type);
|
||||
|
||||
if (type == UPDATE_TYPE_FAST) {
|
||||
// If there's an available clock comparator, we use that.
|
||||
if (dc->clk_mgr->funcs->are_clock_states_equal) {
|
||||
@ -2716,6 +2834,137 @@ static void copy_stream_update_to_stream(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
void dc_reset_state(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
dc_resource_state_destruct(context);
|
||||
|
||||
/* clear the structure, but don't reset the reference count */
|
||||
memset(context, 0, offsetof(struct dc_state, refcount));
|
||||
|
||||
init_state(dc, context);
|
||||
}
|
||||
|
||||
static bool update_planes_and_stream_state(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates, int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update,
|
||||
enum surface_update_type *new_update_type,
|
||||
struct dc_state **new_context)
|
||||
{
|
||||
struct dc_state *context;
|
||||
int i, j;
|
||||
enum surface_update_type update_type;
|
||||
const struct dc_stream_status *stream_status;
|
||||
struct dc_context *dc_ctx = dc->ctx;
|
||||
|
||||
stream_status = dc_stream_get_status(stream);
|
||||
|
||||
if (!stream_status) {
|
||||
if (surface_count) /* Only an error condition if surf_count non-zero*/
|
||||
ASSERT(false);
|
||||
|
||||
return false; /* Cannot commit surface to stream that is not committed */
|
||||
}
|
||||
|
||||
context = dc->current_state;
|
||||
|
||||
update_type = dc_check_update_surfaces_for_stream(
|
||||
dc, srf_updates, surface_count, stream_update, stream_status);
|
||||
|
||||
/* update current stream with the new updates */
|
||||
copy_stream_update_to_stream(dc, context, stream, stream_update);
|
||||
|
||||
/* do not perform surface update if surface has invalid dimensions
|
||||
* (all zero) and no scaling_info is provided
|
||||
*/
|
||||
if (surface_count > 0) {
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
if ((srf_updates[i].surface->src_rect.width == 0 ||
|
||||
srf_updates[i].surface->src_rect.height == 0 ||
|
||||
srf_updates[i].surface->dst_rect.width == 0 ||
|
||||
srf_updates[i].surface->dst_rect.height == 0) &&
|
||||
(!srf_updates[i].scaling_info ||
|
||||
srf_updates[i].scaling_info->src_rect.width == 0 ||
|
||||
srf_updates[i].scaling_info->src_rect.height == 0 ||
|
||||
srf_updates[i].scaling_info->dst_rect.width == 0 ||
|
||||
srf_updates[i].scaling_info->dst_rect.height == 0)) {
|
||||
DC_ERROR("Invalid src/dst rects in surface update!\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (update_type >= update_surface_trace_level)
|
||||
update_surface_trace(dc, srf_updates, surface_count);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_FULL) {
|
||||
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
|
||||
|
||||
for (i = 0; i < surface_count; i++)
|
||||
new_planes[i] = srf_updates[i].surface;
|
||||
|
||||
/* initialize scratch memory for building context */
|
||||
context = dc_create_state(dc);
|
||||
if (context == NULL) {
|
||||
DC_ERROR("Failed to allocate new validate context!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
dc_resource_state_copy_construct(
|
||||
dc->current_state, context);
|
||||
|
||||
/*remove old surfaces from context */
|
||||
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
|
||||
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* add surface to context */
|
||||
if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
|
||||
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* save update parameters into surface */
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
struct dc_plane_state *surface = srf_updates[i].surface;
|
||||
|
||||
copy_surface_update_to_plane(surface, &srf_updates[i]);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_MED) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (pipe_ctx->plane_state != surface)
|
||||
continue;
|
||||
|
||||
resource_build_scaling_params(pipe_ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
*new_context = context;
|
||||
*new_update_type = update_type;
|
||||
|
||||
return true;
|
||||
|
||||
fail:
|
||||
dc_release_state(context);
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
static void commit_planes_do_stream_update(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update,
|
||||
@ -2917,6 +3166,13 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
int i, j;
|
||||
struct pipe_ctx *top_pipe_to_program = NULL;
|
||||
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
|
||||
bool subvp_prev_use = false;
|
||||
|
||||
// Once we apply the new subvp context to hardware it won't be in the
|
||||
// dc->current_state anymore, so we have to cache it before we apply
|
||||
// the new SubVP context
|
||||
subvp_prev_use = false;
|
||||
|
||||
|
||||
dc_z10_restore(dc);
|
||||
|
||||
@ -2955,6 +3211,15 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
// Check old context for SubVP
|
||||
subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
|
||||
if (subvp_prev_use)
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
|
||||
struct pipe_ctx *mpcc_pipe;
|
||||
struct pipe_ctx *odm_pipe;
|
||||
@ -2964,7 +3229,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
|
||||
if (top_pipe_to_program &&
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
|
||||
if (should_use_dmub_lock(stream->link)) {
|
||||
@ -2982,10 +3247,16 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
|
||||
top_pipe_to_program->stream_res.tg);
|
||||
}
|
||||
}
|
||||
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
|
||||
dc->hwss.interdependent_update_lock(dc, context, true);
|
||||
|
||||
} else {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
|
||||
/* Lock the top pipe while updating plane addrs, since freesync requires
|
||||
* plane addr update event triggers to be synchronized.
|
||||
* top_pipe_to_program is expected to never be NULL
|
||||
@ -2993,8 +3264,40 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
|
||||
}
|
||||
|
||||
if (update_type != UPDATE_TYPE_FAST) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
|
||||
subvp_prev_use) {
|
||||
// If old context or new context has phantom pipes, apply
|
||||
// the phantom timings now. We can't change the phantom
|
||||
// pipe configuration safely without driver acquiring
|
||||
// the DMCUB lock first.
|
||||
dc->hwss.apply_ctx_to_hw(dc, context);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
|
||||
|
||||
if (update_type != UPDATE_TYPE_FAST) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
|
||||
subvp_prev_use) {
|
||||
// If old context or new context has phantom pipes, apply
|
||||
// the phantom timings now. We can't change the phantom
|
||||
// pipe configuration safely without driver acquiring
|
||||
// the DMCUB lock first.
|
||||
dc->hwss.apply_ctx_to_hw(dc, context);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stream updates
|
||||
if (stream_update)
|
||||
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
|
||||
@ -3009,12 +3312,27 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
if (dc->hwss.program_front_end_for_ctx)
|
||||
dc->hwss.program_front_end_for_ctx(dc, context);
|
||||
|
||||
if (update_type != UPDATE_TYPE_FAST)
|
||||
if (dc->hwss.commit_subvp_config)
|
||||
dc->hwss.commit_subvp_config(dc, context);
|
||||
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
dc->hwss.interdependent_update_lock(dc, context, false);
|
||||
} else {
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
}
|
||||
dc->hwss.post_unlock_program_front_end(dc, context);
|
||||
|
||||
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
|
||||
* move the SubVP lock to after the phantom pipes have been setup
|
||||
*/
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
|
||||
} else {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3138,27 +3456,27 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
if (update_type != UPDATE_TYPE_FAST)
|
||||
if (dc->hwss.commit_subvp_config)
|
||||
dc->hwss.commit_subvp_config(dc, context);
|
||||
#endif
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
|
||||
dc->hwss.interdependent_update_lock(dc, context, false);
|
||||
else
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
if (update_type != UPDATE_TYPE_FAST)
|
||||
if (dc->hwss.commit_subvp_config)
|
||||
dc->hwss.commit_subvp_config(dc, context);
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
dc->hwss.interdependent_update_lock(dc, context, false);
|
||||
} else {
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
|
||||
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
|
||||
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VBLANK);
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VBLANK);
|
||||
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
|
||||
if (stream && should_use_dmub_lock(stream->link)) {
|
||||
union dmub_hw_lock_flags hw_locks = { 0 };
|
||||
@ -3175,10 +3493,23 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
|
||||
top_pipe_to_program->stream_res.tg);
|
||||
}
|
||||
}
|
||||
|
||||
if (update_type != UPDATE_TYPE_FAST)
|
||||
if (update_type != UPDATE_TYPE_FAST) {
|
||||
dc->hwss.post_unlock_program_front_end(dc, context);
|
||||
|
||||
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
|
||||
* move the SubVP lock to after the phantom pipes have been setup
|
||||
*/
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
|
||||
} else {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
|
||||
}
|
||||
}
|
||||
|
||||
// Fire manual trigger only when bottom plane is flipped
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
@ -3197,6 +3528,152 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
static bool commit_minimal_transition_state(struct dc *dc,
|
||||
struct dc_state *transition_base_context)
|
||||
{
|
||||
struct dc_state *transition_context = dc_create_state(dc);
|
||||
enum pipe_split_policy tmp_policy;
|
||||
enum dc_status ret = DC_ERROR_UNEXPECTED;
|
||||
unsigned int i, j;
|
||||
|
||||
if (!transition_context)
|
||||
return false;
|
||||
|
||||
tmp_policy = dc->debug.pipe_split_policy;
|
||||
dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
|
||||
|
||||
dc_resource_state_copy_construct(transition_base_context, transition_context);
|
||||
|
||||
//commit minimal state
|
||||
if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
|
||||
for (i = 0; i < transition_context->stream_count; i++) {
|
||||
struct dc_stream_status *stream_status = &transition_context->stream_status[i];
|
||||
|
||||
for (j = 0; j < stream_status->plane_count; j++) {
|
||||
struct dc_plane_state *plane_state = stream_status->plane_states[j];
|
||||
|
||||
/* force vsync flip when reconfiguring pipes to prevent underflow
|
||||
* and corruption
|
||||
*/
|
||||
plane_state->flip_immediate = false;
|
||||
}
|
||||
}
|
||||
|
||||
ret = dc_commit_state_no_check(dc, transition_context);
|
||||
}
|
||||
|
||||
//always release as dc_commit_state_no_check retains in good case
|
||||
dc_release_state(transition_context);
|
||||
|
||||
//restore previous pipe split policy
|
||||
dc->debug.pipe_split_policy = tmp_policy;
|
||||
|
||||
if (ret != DC_OK) {
|
||||
//this should never happen
|
||||
BREAK_TO_DEBUGGER();
|
||||
return false;
|
||||
}
|
||||
|
||||
//force full surface update
|
||||
for (i = 0; i < dc->current_state->stream_count; i++) {
|
||||
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
|
||||
dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_update_planes_and_stream(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates, int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update)
|
||||
{
|
||||
struct dc_state *context;
|
||||
enum surface_update_type update_type;
|
||||
int i;
|
||||
|
||||
/* In cases where MPO and split or ODM are used transitions can
|
||||
* cause underflow. Apply stream configuration with minimal pipe
|
||||
* split first to avoid unsupported transitions for active pipes.
|
||||
*/
|
||||
bool force_minimal_pipe_splitting = false;
|
||||
bool is_plane_addition = false;
|
||||
|
||||
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
|
||||
|
||||
if (cur_stream_status &&
|
||||
dc->current_state->stream_count > 0 &&
|
||||
dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
|
||||
/* determine if minimal transition is required */
|
||||
if (cur_stream_status->plane_count > surface_count) {
|
||||
force_minimal_pipe_splitting = true;
|
||||
} else if (cur_stream_status->plane_count < surface_count) {
|
||||
force_minimal_pipe_splitting = true;
|
||||
is_plane_addition = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* on plane addition, minimal state is the current one */
|
||||
if (force_minimal_pipe_splitting && is_plane_addition &&
|
||||
!commit_minimal_transition_state(dc, dc->current_state))
|
||||
return false;
|
||||
|
||||
if (!update_planes_and_stream_state(
|
||||
dc,
|
||||
srf_updates,
|
||||
surface_count,
|
||||
stream,
|
||||
stream_update,
|
||||
&update_type,
|
||||
&context))
|
||||
return false;
|
||||
|
||||
/* on plane addition, minimal state is the new one */
|
||||
if (force_minimal_pipe_splitting && !is_plane_addition) {
|
||||
if (!commit_minimal_transition_state(dc, context)) {
|
||||
dc_release_state(context);
|
||||
return false;
|
||||
}
|
||||
|
||||
update_type = UPDATE_TYPE_FULL;
|
||||
}
|
||||
|
||||
commit_planes_for_stream(
|
||||
dc,
|
||||
srf_updates,
|
||||
surface_count,
|
||||
stream,
|
||||
stream_update,
|
||||
update_type,
|
||||
context);
|
||||
|
||||
if (dc->current_state != context) {
|
||||
|
||||
/* Since memory free requires elevated IRQL, an interrupt
|
||||
* request is generated by mem free. If this happens
|
||||
* between freeing and reassigning the context, our vsync
|
||||
* interrupt will call into dc and cause a memory
|
||||
* corruption BSOD. Hence, we first reassign the context,
|
||||
* then free the old context.
|
||||
*/
|
||||
|
||||
struct dc_state *old = dc->current_state;
|
||||
|
||||
dc->current_state = context;
|
||||
dc_release_state(old);
|
||||
|
||||
// clear any forced full updates
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
|
||||
pipe_ctx->plane_state->force_full_update = false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void dc_commit_updates_for_stream(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
@ -3779,10 +4256,18 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
|
||||
/* cleanup on driver unload */
|
||||
void dc_hardware_release(struct dc *dc)
|
||||
{
|
||||
dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
|
||||
|
||||
if (dc->hwss.hardware_release)
|
||||
dc->hwss.hardware_release(dc);
|
||||
}
|
||||
|
||||
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
|
||||
{
|
||||
if (dc->current_state)
|
||||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
|
||||
}
|
||||
|
||||
/*
|
||||
*****************************************************************************
|
||||
* Function: dc_is_dmub_outbox_supported -
|
||||
@ -3807,6 +4292,10 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
|
||||
if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
|
||||
/* dmub aux needs dmub notifications to be enabled */
|
||||
return dc->debug.enable_dmub_aux_for_legacy_ddc;
|
||||
}
|
||||
|
@ -424,6 +424,8 @@ char *dc_status_to_str(enum dc_status status)
|
||||
return "No link encoder resource";
|
||||
case DC_FAIL_DP_PAYLOAD_ALLOCATION:
|
||||
return "Fail dp payload allocation";
|
||||
case DC_FAIL_DP_LINK_BANDWIDTH:
|
||||
return "Insufficient DP link bandwidth";
|
||||
case DC_ERROR_UNEXPECTED:
|
||||
return "Unexpected error";
|
||||
}
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "core_types.h"
|
||||
#include "timing_generator.h"
|
||||
|
@ -1969,7 +1969,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
enum dc_status status;
|
||||
bool skip_video_pattern;
|
||||
struct dc_link *link = stream->link;
|
||||
struct dc_link_settings link_settings = {0};
|
||||
const struct dc_link_settings *link_settings =
|
||||
&pipe_ctx->link_config.dp_link_settings;
|
||||
bool fec_enable;
|
||||
int i;
|
||||
bool apply_seamless_boot_optimization = false;
|
||||
@ -1986,9 +1987,6 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
/* get link settings for video mode timing */
|
||||
decide_link_settings(stream, &link_settings);
|
||||
|
||||
/* Train with fallback when enabling DPIA link. Conventional links are
|
||||
* trained with fallback during sink detection.
|
||||
*/
|
||||
@ -1999,7 +1997,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
* Temporary w/a to get DP2.0 link rates to work with SST.
|
||||
* TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
|
||||
*/
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING &&
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
|
||||
link->dc->debug.set_mst_en_for_sst) {
|
||||
dp_enable_mst_on_sink(link, true);
|
||||
@ -2012,11 +2010,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
|
||||
/* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
|
||||
} else {
|
||||
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
|
||||
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
if (state->clk_mgr && !apply_seamless_boot_optimization)
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr,
|
||||
state, false);
|
||||
@ -2032,16 +2030,15 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
|
||||
skip_video_pattern = true;
|
||||
|
||||
if (link_settings.link_rate == LINK_RATE_LOW)
|
||||
if (link_settings->link_rate == LINK_RATE_LOW)
|
||||
skip_video_pattern = false;
|
||||
|
||||
if (perform_link_training_with_retries(&link_settings,
|
||||
if (perform_link_training_with_retries(link_settings,
|
||||
skip_video_pattern,
|
||||
LINK_TRAINING_ATTEMPTS,
|
||||
pipe_ctx,
|
||||
pipe_ctx->stream->signal,
|
||||
do_fallback)) {
|
||||
link->cur_link_settings = link_settings;
|
||||
status = DC_OK;
|
||||
} else {
|
||||
status = DC_FAIL_DP_LINK_TRAINING;
|
||||
@ -2052,7 +2049,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
else
|
||||
fec_enable = true;
|
||||
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
|
||||
dp_set_fec_enable(link, fec_enable);
|
||||
|
||||
// during mode set we do DP_SET_POWER off then on, aux writes are lost
|
||||
@ -2734,6 +2731,22 @@ static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
|
||||
|
||||
}
|
||||
|
||||
bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
|
||||
{
|
||||
bool ret = false;
|
||||
union dpcd_alpm_configuration alpm_config;
|
||||
|
||||
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
|
||||
memset(&alpm_config, 0, sizeof(alpm_config));
|
||||
|
||||
alpm_config.bits.ENABLE = (enable ? true : false);
|
||||
ret = dm_helpers_dp_write_dpcd(link->ctx, link,
|
||||
DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw,
|
||||
sizeof(alpm_config.raw));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/****************************enable_link***********************************/
|
||||
static enum dc_status enable_link(
|
||||
struct dc_state *state,
|
||||
@ -3228,7 +3241,6 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
unsigned int panel_inst;
|
||||
/* updateSinkPsrDpcdConfig*/
|
||||
union dpcd_psr_configuration psr_configuration;
|
||||
union dpcd_alpm_configuration alpm_configuration;
|
||||
union dpcd_sink_active_vtotal_control_mode vtotal_control = {0};
|
||||
|
||||
psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
|
||||
@ -3284,15 +3296,7 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
sizeof(psr_configuration.raw));
|
||||
|
||||
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
|
||||
memset(&alpm_configuration, 0, sizeof(alpm_configuration));
|
||||
|
||||
alpm_configuration.bits.ENABLE = 1;
|
||||
dm_helpers_dp_write_dpcd(
|
||||
link->ctx,
|
||||
link,
|
||||
DP_RECEIVER_ALPM_CONFIG,
|
||||
&alpm_configuration.raw,
|
||||
sizeof(alpm_configuration.raw));
|
||||
dc_power_alpm_dpcd_enable(link, true);
|
||||
psr_context->su_granularity_required =
|
||||
psr_config->su_granularity_required;
|
||||
psr_context->su_y_granularity =
|
||||
@ -3368,6 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
switch(link->ctx->asic_id.chip_family) {
|
||||
case FAMILY_YELLOW_CARP:
|
||||
case AMDGPU_FAMILY_GC_10_3_6:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
if(!dc->debug.disable_z10)
|
||||
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
|
||||
break;
|
||||
@ -4117,11 +4122,10 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
|
||||
struct fixed31_32 avg_time_slots_per_mtp;
|
||||
uint8_t req_slot_count = 0;
|
||||
uint8_t vc_id = 1; /// VC ID always 1 for SST
|
||||
struct dc_link_settings link_settings = {0};
|
||||
struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res);
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
decide_link_settings(stream, &link_settings);
|
||||
stream->link->cur_link_settings = link_settings;
|
||||
|
||||
if (link_hwss->ext.enable_dp_link_output)
|
||||
@ -4595,10 +4599,7 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
|
||||
if (link_stream->dpms_off)
|
||||
return;
|
||||
|
||||
decide_link_settings(link_stream, &store_settings);
|
||||
|
||||
if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
|
||||
(store_settings.link_rate != LINK_RATE_UNKNOWN))
|
||||
if (decide_link_settings(link_stream, &store_settings))
|
||||
dp_retrain_link_dp_test(link, &store_settings, false);
|
||||
}
|
||||
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "gpio_service_interface.h"
|
||||
@ -95,16 +93,13 @@ union hdmi_scdc_update_read_data {
|
||||
};
|
||||
|
||||
union hdmi_scdc_status_flags_data {
|
||||
uint8_t byte[2];
|
||||
uint8_t byte;
|
||||
struct {
|
||||
uint8_t CLOCK_DETECTED:1;
|
||||
uint8_t CH0_LOCKED:1;
|
||||
uint8_t CH1_LOCKED:1;
|
||||
uint8_t CH2_LOCKED:1;
|
||||
uint8_t RESERVED:4;
|
||||
uint8_t RESERVED2:8;
|
||||
uint8_t RESERVED3:8;
|
||||
|
||||
} fields;
|
||||
};
|
||||
|
||||
@ -772,7 +767,7 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
|
||||
sizeof(scramble_status));
|
||||
offset = HDMI_SCDC_STATUS_FLAGS;
|
||||
dal_ddc_service_query_ddc_data(ddc_service, slave_address,
|
||||
&offset, sizeof(offset), status_data.byte,
|
||||
&offset, sizeof(offset), &status_data.byte,
|
||||
sizeof(status_data.byte));
|
||||
}
|
||||
}
|
||||
|
@ -3880,15 +3880,13 @@ static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_
|
||||
return true;
|
||||
}
|
||||
|
||||
void decide_link_settings(struct dc_stream_state *stream,
|
||||
bool decide_link_settings(struct dc_stream_state *stream,
|
||||
struct dc_link_settings *link_setting)
|
||||
{
|
||||
struct dc_link *link;
|
||||
uint32_t req_bw;
|
||||
struct dc_link *link = stream->link;
|
||||
uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
|
||||
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
|
||||
link = stream->link;
|
||||
memset(link_setting, 0, sizeof(*link_setting));
|
||||
|
||||
/* if preferred is specified through AMDDP, use it, if it's enough
|
||||
* to drive the mode
|
||||
@ -3897,16 +3895,15 @@ void decide_link_settings(struct dc_stream_state *stream,
|
||||
LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate !=
|
||||
LINK_RATE_UNKNOWN) {
|
||||
*link_setting = link->preferred_link_setting;
|
||||
return;
|
||||
*link_setting = link->preferred_link_setting;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* MST doesn't perform link training for now
|
||||
* TODO: add MST specific link training routine
|
||||
*/
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
if (decide_mst_link_settings(link, link_setting))
|
||||
return;
|
||||
decide_mst_link_settings(link, link_setting);
|
||||
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
/* enable edp link optimization for DSC eDP case */
|
||||
if (stream->timing.flags.DSC) {
|
||||
@ -3924,17 +3921,16 @@ void decide_link_settings(struct dc_stream_state *stream,
|
||||
decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
|
||||
max_link_rate = tmp_link_setting.link_rate;
|
||||
}
|
||||
if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate))
|
||||
return;
|
||||
} else if (decide_edp_link_settings(link, link_setting, req_bw))
|
||||
return;
|
||||
} else if (decide_dp_link_settings(link, link_setting, req_bw))
|
||||
return;
|
||||
decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
|
||||
} else {
|
||||
decide_edp_link_settings(link, link_setting, req_bw);
|
||||
}
|
||||
} else {
|
||||
decide_dp_link_settings(link, link_setting, req_bw);
|
||||
}
|
||||
|
||||
BREAK_TO_DEBUGGER();
|
||||
ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
|
||||
|
||||
*link_setting = link->verified_link_cap;
|
||||
return link_setting->lane_count != LANE_COUNT_UNKNOWN &&
|
||||
link_setting->link_rate != LINK_RATE_UNKNOWN;
|
||||
}
|
||||
|
||||
/*************************Short Pulse IRQ***************************/
|
||||
@ -4509,7 +4505,6 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
|
||||
{
|
||||
int i;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
struct dc_link_settings prev_link_settings = link->preferred_link_setting;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
@ -4520,10 +4515,6 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
|
||||
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
|
||||
return;
|
||||
|
||||
/* toggle stream state with the preference for current link settings */
|
||||
dc_link_set_preferred_training_settings((struct dc *)link->dc,
|
||||
&link->cur_link_settings, NULL, link, true);
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
|
||||
@ -4539,10 +4530,6 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
|
||||
core_link_enable_stream(link->dc->current_state, pipe_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
/* restore previous link settings preference */
|
||||
dc_link_set_preferred_training_settings((struct dc *)link->dc,
|
||||
&prev_link_settings, NULL, link, true);
|
||||
}
|
||||
|
||||
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
|
||||
@ -4890,7 +4877,7 @@ static void get_active_converter_info(
|
||||
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (link->dc->caps.hdmi_frl_pcon_support) {
|
||||
if (link->dc->caps.dp_hdmi21_pcon_support) {
|
||||
union hdmi_encoded_link_bw hdmi_encoded_link_bw;
|
||||
|
||||
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
|
||||
@ -5553,7 +5540,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
* only if required.
|
||||
*/
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
|
||||
!link->dc->debug.dpia_debug.bits.disable_force_tbt3_work_around &&
|
||||
link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around &&
|
||||
link->dpcd_caps.is_branch_dev &&
|
||||
link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||
link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
|
||||
@ -6971,13 +6958,14 @@ bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
|
||||
dc_is_dp_signal(pipe_ctx->stream->signal));
|
||||
}
|
||||
|
||||
void edp_panel_backlight_power_on(struct dc_link *link)
|
||||
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
|
||||
{
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP)
|
||||
return;
|
||||
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
if (wait_for_hpd)
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
if (link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_backlight_control(link, true);
|
||||
}
|
||||
|
@ -85,6 +85,13 @@ bool dc_link_dpia_query_hpd_status(struct dc_link *link)
|
||||
if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
|
||||
is_hpd_high = cmd.query_hpd.data.result;
|
||||
|
||||
DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
|
||||
__func__,
|
||||
link->link_index,
|
||||
link->link_id.enum_id - ENUM_ID_1,
|
||||
cmd.query_hpd.data.status,
|
||||
cmd.query_hpd.data.result);
|
||||
|
||||
return is_hpd_high;
|
||||
}
|
||||
|
||||
@ -195,30 +202,34 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
|
||||
}
|
||||
|
||||
/* Convert DC training pattern to DPIA training stage. */
|
||||
static enum dpia_set_config_ts convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps)
|
||||
static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps, enum dpia_set_config_ts *ts)
|
||||
{
|
||||
enum dpia_set_config_ts ts;
|
||||
enum dc_status status = DC_OK;
|
||||
|
||||
switch (tps) {
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_1:
|
||||
ts = DPIA_TS_TPS1;
|
||||
*ts = DPIA_TS_TPS1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_2:
|
||||
ts = DPIA_TS_TPS2;
|
||||
*ts = DPIA_TS_TPS2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_3:
|
||||
ts = DPIA_TS_TPS3;
|
||||
*ts = DPIA_TS_TPS3;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_4:
|
||||
ts = DPIA_TS_TPS4;
|
||||
*ts = DPIA_TS_TPS4;
|
||||
break;
|
||||
default:
|
||||
ts = DPIA_TS_DPRX_DONE;
|
||||
ASSERT(false); /* TPS not supported by helper function. */
|
||||
case DP_TRAINING_PATTERN_VIDEOIDLE:
|
||||
*ts = DPIA_TS_DPRX_DONE;
|
||||
break;
|
||||
default: /* TPS not supported by helper function. */
|
||||
ASSERT(false);
|
||||
*ts = DPIA_TS_DPRX_DONE;
|
||||
status = DC_UNSUPPORTED_VALUE;
|
||||
break;
|
||||
}
|
||||
|
||||
return ts;
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Write training pattern to DPCD. */
|
||||
@ -329,10 +340,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
|
||||
/* DPOA-to-x */
|
||||
/* Instruct DPOA to transmit TPS1 then update DPCD. */
|
||||
if (retry_count == 0) {
|
||||
ts = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr);
|
||||
status = core_link_send_set_config(link,
|
||||
DPIA_SET_CFG_SET_TRAINING,
|
||||
ts);
|
||||
status = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr, &ts);
|
||||
if (status != DC_OK) {
|
||||
result = LINK_TRAINING_ABORT;
|
||||
break;
|
||||
@ -414,13 +422,14 @@ static enum link_training_result dpia_training_cr_non_transparent(
|
||||
if (link->is_hpd_pending)
|
||||
result = LINK_TRAINING_ABORT;
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
|
||||
" -hop(%d)\n - result(%d)\n - retries(%d)\n",
|
||||
DC_LOG_HW_LINK_TRAINING(
|
||||
"%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n",
|
||||
__func__,
|
||||
link->link_id.enum_id - ENUM_ID_1,
|
||||
hop,
|
||||
result,
|
||||
retry_count);
|
||||
retry_count,
|
||||
status);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -624,7 +633,11 @@ static enum link_training_result dpia_training_eq_non_transparent(
|
||||
|
||||
/* Instruct DPOA to transmit TPSn then update DPCD. */
|
||||
if (retries_eq == 0) {
|
||||
ts = convert_trng_ptn_to_trng_stg(tr_pattern);
|
||||
status = convert_trng_ptn_to_trng_stg(tr_pattern, &ts);
|
||||
if (status != DC_OK) {
|
||||
result = LINK_TRAINING_ABORT;
|
||||
break;
|
||||
}
|
||||
status = core_link_send_set_config(link,
|
||||
DPIA_SET_CFG_SET_TRAINING,
|
||||
ts);
|
||||
@ -705,13 +718,14 @@ static enum link_training_result dpia_training_eq_non_transparent(
|
||||
if (link->is_hpd_pending)
|
||||
result = LINK_TRAINING_ABORT;
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
|
||||
" - hop(%d)\n - result(%d)\n - retries(%d)\n",
|
||||
DC_LOG_HW_LINK_TRAINING(
|
||||
"%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n",
|
||||
__func__,
|
||||
link->link_id.enum_id - ENUM_ID_1,
|
||||
hop,
|
||||
result,
|
||||
retries_eq);
|
||||
retries_eq,
|
||||
status);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -946,7 +960,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
|
||||
bool skip_video_pattern)
|
||||
{
|
||||
enum link_training_result result;
|
||||
struct link_training_settings lt_settings;
|
||||
struct link_training_settings lt_settings = {0};
|
||||
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
|
||||
int8_t repeater_id; /* Current hop. */
|
||||
|
||||
|
@ -292,6 +292,7 @@ void link_enc_cfg_link_encs_assign(
|
||||
int j;
|
||||
|
||||
ASSERT(state->stream_count == stream_count);
|
||||
ASSERT(dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_STEADY);
|
||||
|
||||
/* Release DIG link encoder resources before running assignment algorithm. */
|
||||
for (i = 0; i < dc->current_state->stream_count; i++)
|
||||
@ -561,6 +562,31 @@ struct link_encoder *link_enc_cfg_get_link_enc(
|
||||
return link_enc;
|
||||
}
|
||||
|
||||
struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream_current(
|
||||
struct dc *dc,
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
struct link_encoder *link_enc = NULL;
|
||||
struct display_endpoint_id ep_id;
|
||||
int i;
|
||||
|
||||
ep_id = (struct display_endpoint_id) {
|
||||
.link_id = stream->link->link_id,
|
||||
.ep_type = stream->link->ep_type};
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct link_enc_assignment assignment =
|
||||
dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
|
||||
|
||||
if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) {
|
||||
link_enc = stream->link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return link_enc;
|
||||
}
|
||||
|
||||
bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link)
|
||||
{
|
||||
bool is_avail = true;
|
||||
@ -595,6 +621,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
|
||||
uint8_t dig_stream_count = 0;
|
||||
int matching_stream_ptrs = 0;
|
||||
int eng_ids_per_ep_id[MAX_PIPES] = {0};
|
||||
int ep_ids_per_eng_id[MAX_PIPES] = {0};
|
||||
int valid_bitmap = 0;
|
||||
|
||||
/* (1) No. valid entries same as stream count. */
|
||||
@ -630,6 +657,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
|
||||
struct display_endpoint_id ep_id_i = assignment_i.ep_id;
|
||||
|
||||
eng_ids_per_ep_id[i]++;
|
||||
ep_ids_per_eng_id[i]++;
|
||||
for (j = 0; j < MAX_PIPES; j++) {
|
||||
struct link_enc_assignment assignment_j =
|
||||
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j];
|
||||
@ -644,6 +672,10 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
|
||||
assignment_i.eng_id != assignment_j.eng_id) {
|
||||
valid_uniqueness = false;
|
||||
eng_ids_per_ep_id[i]++;
|
||||
} else if (!are_ep_ids_equal(&ep_id_i, &ep_id_j) &&
|
||||
assignment_i.eng_id == assignment_j.eng_id) {
|
||||
valid_uniqueness = false;
|
||||
ep_ids_per_eng_id[i]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "resource.h"
|
||||
@ -65,6 +63,7 @@
|
||||
#include "dcn302/dcn302_resource.h"
|
||||
#include "dcn303/dcn303_resource.h"
|
||||
#include "dcn31/dcn31_resource.h"
|
||||
#include "dcn314/dcn314_resource.h"
|
||||
#include "dcn315/dcn315_resource.h"
|
||||
#include "dcn316/dcn316_resource.h"
|
||||
#include "../dcn32/dcn32_resource.h"
|
||||
@ -169,6 +168,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_21;
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_14;
|
||||
break;
|
||||
default:
|
||||
dc_version = DCE_VERSION_UNKNOWN;
|
||||
break;
|
||||
@ -258,6 +261,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
|
||||
case DCN_VERSION_3_1:
|
||||
res_pool = dcn31_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
case DCN_VERSION_3_14:
|
||||
res_pool = dcn314_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
case DCN_VERSION_3_15:
|
||||
res_pool = dcn315_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
@ -759,6 +765,10 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
|
||||
(*split_idx)++;
|
||||
split_pipe = split_pipe->top_pipe;
|
||||
}
|
||||
|
||||
/* MPO window on right side of ODM split */
|
||||
if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe)
|
||||
(*split_idx)++;
|
||||
} else {
|
||||
/*Get odm split index*/
|
||||
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
|
||||
@ -805,7 +815,11 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
|
||||
/*
|
||||
* Only the leftmost ODM pipe should be offset by a nonzero distance
|
||||
*/
|
||||
if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) {
|
||||
/* MPO window on right side of ODM split */
|
||||
data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) *
|
||||
stream->dst.width / stream->src.width;
|
||||
} else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
|
||||
data->recout.x = stream->dst.x;
|
||||
if (stream->src.x < surf_clip.x)
|
||||
data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
|
||||
@ -1003,6 +1017,8 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
|
||||
* stream->dst.height / stream->src.height;
|
||||
if (pipe_ctx->prev_odm_pipe && split_idx)
|
||||
ro_lb = data->h_active * split_idx - recout_full_x;
|
||||
else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe)
|
||||
ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x;
|
||||
else
|
||||
ro_lb = data->recout.x - recout_full_x;
|
||||
ro_tb = data->recout.y - recout_full_y;
|
||||
@ -1108,9 +1124,26 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
timing->h_border_left + timing->h_border_right;
|
||||
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
|
||||
timing->v_border_top + timing->v_border_bottom;
|
||||
if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
|
||||
if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) {
|
||||
pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
|
||||
|
||||
DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n",
|
||||
__func__,
|
||||
pipe_ctx->pipe_idx,
|
||||
pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1,
|
||||
pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1);
|
||||
} /* ODM + windows MPO, where window is on either right or left ODM half */
|
||||
else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) {
|
||||
|
||||
pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1;
|
||||
|
||||
DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n",
|
||||
__func__,
|
||||
pipe_ctx->pipe_idx,
|
||||
pipe_ctx->top_pipe->pipe_idx,
|
||||
pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1,
|
||||
pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1);
|
||||
}
|
||||
/* depends on h_active */
|
||||
calculate_recout(pipe_ctx);
|
||||
/* depends on pixel format */
|
||||
@ -1118,10 +1151,12 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
/* depends on scaling ratios and recout, does not calculate offset yet */
|
||||
calculate_viewport_size(pipe_ctx);
|
||||
|
||||
/* Stopgap for validation of ODM + MPO on one side of screen case */
|
||||
if (pipe_ctx->plane_res.scl_data.viewport.height < 1 ||
|
||||
pipe_ctx->plane_res.scl_data.viewport.width < 1)
|
||||
return false;
|
||||
if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) {
|
||||
/* Stopgap for validation of ODM + MPO on one side of screen case */
|
||||
if (pipe_ctx->plane_res.scl_data.viewport.height < 1 ||
|
||||
pipe_ctx->plane_res.scl_data.viewport.width < 1)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* LB calculations depend on vp size, h/v_active and scaling ratios
|
||||
@ -1129,12 +1164,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
* on certain displays, such as the Sharp 4k. 36bpp is needed
|
||||
* to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
|
||||
* SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
|
||||
* precision on at least DCN display engines. However, at least
|
||||
* Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
|
||||
* so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
|
||||
* did not show such problems, so this seems to be the exception.
|
||||
* precision on DCN display engines, but apparently not for DCE, as
|
||||
* far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have
|
||||
* problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
|
||||
* neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel
|
||||
* passthrough). Therefore only use 36 bpp on DCN where it is actually needed.
|
||||
*/
|
||||
if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
|
||||
if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
|
||||
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
|
||||
else
|
||||
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
|
||||
@ -1370,8 +1406,12 @@ static struct pipe_ctx *acquire_free_pipe_for_head(
|
||||
* to acquire an idle one to satisfy the request
|
||||
*/
|
||||
|
||||
if (!pool->funcs->acquire_idle_pipe_for_layer)
|
||||
return NULL;
|
||||
if (!pool->funcs->acquire_idle_pipe_for_layer) {
|
||||
if (!pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer)
|
||||
return NULL;
|
||||
else
|
||||
return pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer(context, pool, head_pipe->stream, head_pipe);
|
||||
}
|
||||
|
||||
return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
|
||||
}
|
||||
@ -1421,7 +1461,10 @@ bool dc_add_plane_to_context(
|
||||
struct resource_pool *pool = dc->res_pool;
|
||||
struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct pipe_ctx *prev_right_head = NULL;
|
||||
struct pipe_ctx *free_right_pipe = NULL;
|
||||
|
||||
DC_LOGGER_INIT(stream->ctx->logger);
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
if (context->streams[i] == stream) {
|
||||
stream_status = &context->stream_status[i];
|
||||
@ -1468,23 +1511,88 @@ bool dc_add_plane_to_context(
|
||||
if (head_pipe != free_pipe) {
|
||||
tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
|
||||
ASSERT(tail_pipe);
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->clock_source;
|
||||
free_pipe->top_pipe = tail_pipe;
|
||||
tail_pipe->bottom_pipe = free_pipe;
|
||||
if (!free_pipe->next_odm_pipe && tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
|
||||
free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
|
||||
}
|
||||
if (!free_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
|
||||
free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
|
||||
tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
|
||||
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) &&
|
||||
tail_pipe->next_odm_pipe) {
|
||||
|
||||
DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d tail_pipe->next_odm_pipe:%d\n",
|
||||
__func__,
|
||||
free_pipe->pipe_idx,
|
||||
tail_pipe->next_odm_pipe ? tail_pipe->next_odm_pipe->pipe_idx : -1);
|
||||
|
||||
/*
|
||||
* We want to avoid the case where the right side already has a pipe assigned to
|
||||
* it and is different from free_pipe ( which would cause trigger a pipe
|
||||
* reallocation ).
|
||||
* Check the old context to see if the right side already has a pipe allocated
|
||||
* - If not, continue to use free_pipe
|
||||
* - If the right side already has a pipe, use that pipe instead if its available
|
||||
*/
|
||||
prev_right_head = &dc->current_state->res_ctx.pipe_ctx[tail_pipe->next_odm_pipe->pipe_idx];
|
||||
if ((prev_right_head->bottom_pipe) && (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
|
||||
free_right_pipe = acquire_free_pipe_for_head(context, pool, tail_pipe->next_odm_pipe);
|
||||
if (free_right_pipe) {
|
||||
free_pipe->stream = NULL;
|
||||
memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
|
||||
memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
|
||||
free_pipe->plane_state = NULL;
|
||||
free_pipe->pipe_idx = 0;
|
||||
free_right_pipe->plane_state = plane_state;
|
||||
free_pipe = free_right_pipe;
|
||||
}
|
||||
}
|
||||
|
||||
free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source;
|
||||
|
||||
free_pipe->top_pipe = tail_pipe->next_odm_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe = free_pipe;
|
||||
} else {
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->clock_source;
|
||||
|
||||
free_pipe->top_pipe = tail_pipe;
|
||||
tail_pipe->bottom_pipe = free_pipe;
|
||||
|
||||
if (!free_pipe->next_odm_pipe && tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
|
||||
free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
|
||||
}
|
||||
if (!free_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
|
||||
free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
|
||||
tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ODM + window MPO, where MPO window is on left half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
|
||||
free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
DC_LOG_SCALER("%s - ODM + window MPO(left). free_pipe:%d\n",
|
||||
__func__,
|
||||
free_pipe->pipe_idx);
|
||||
break;
|
||||
}
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d\n",
|
||||
__func__,
|
||||
free_pipe->pipe_idx);
|
||||
break;
|
||||
}
|
||||
|
||||
head_pipe = head_pipe->next_odm_pipe;
|
||||
}
|
||||
/* assign new surfaces*/
|
||||
@ -1702,6 +1810,9 @@ bool dc_is_stream_unchanged(
|
||||
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
|
||||
return false;
|
||||
|
||||
if (old_stream->odm_2to1_policy_applied != stream->odm_2to1_policy_applied)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1882,6 +1993,12 @@ static int acquire_first_free_pipe(
|
||||
pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
|
||||
pipe_ctx->pipe_idx = i;
|
||||
|
||||
if (i >= pool->timing_generator_count) {
|
||||
int tg_inst = pool->timing_generator_count - 1;
|
||||
|
||||
pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
|
||||
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
|
||||
}
|
||||
|
||||
pipe_ctx->stream = stream;
|
||||
return i;
|
||||
@ -1994,9 +2111,6 @@ enum dc_status dc_remove_stream_from_ctx(
|
||||
dc->res_pool,
|
||||
del_pipe->stream_res.stream_enc,
|
||||
false);
|
||||
/* Release link encoder from stream in new dc_state. */
|
||||
if (dc->res_pool->funcs->link_enc_unassign)
|
||||
dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
|
||||
|
||||
if (is_dp_128b_132b_signal(del_pipe)) {
|
||||
update_hpo_dp_stream_engine_usage(
|
||||
@ -2297,12 +2411,10 @@ enum dc_status resource_map_pool_resources(
|
||||
/* Allocate DP HPO Stream Encoder based on signal, hw capabilities
|
||||
* and link settings
|
||||
*/
|
||||
if (dc_is_dp_signal(stream->signal) &&
|
||||
dc->caps.dp_hpo) {
|
||||
struct dc_link_settings link_settings = {0};
|
||||
|
||||
decide_link_settings(stream, &link_settings);
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (dc_is_dp_signal(stream->signal)) {
|
||||
if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
|
||||
return DC_FAIL_DP_LINK_BANDWIDTH;
|
||||
if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc =
|
||||
find_first_free_match_hpo_dp_stream_enc_for_link(
|
||||
&context->res_ctx, pool, stream);
|
||||
@ -2510,6 +2622,8 @@ static void set_avi_info_frame(
|
||||
union hdmi_info_packet hdmi_info;
|
||||
union display_content_support support = {0};
|
||||
unsigned int vic = pipe_ctx->stream->timing.vic;
|
||||
unsigned int rid = pipe_ctx->stream->timing.rid;
|
||||
unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
|
||||
enum dc_timing_3d_format format;
|
||||
|
||||
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
|
||||
@ -2702,6 +2816,15 @@ static void set_avi_info_frame(
|
||||
hdmi_info.bits.header.length = 14;
|
||||
}
|
||||
|
||||
if (rid != 0 && fr_ind != 0) {
|
||||
hdmi_info.bits.header.version = 5;
|
||||
hdmi_info.bits.header.length = 15;
|
||||
|
||||
hdmi_info.bits.FR0_FR3 = fr_ind & 0xF;
|
||||
hdmi_info.bits.FR4 = (fr_ind >> 4) & 0x1;
|
||||
hdmi_info.bits.RID0_RID5 = rid;
|
||||
}
|
||||
|
||||
/* pixel repetition
|
||||
* PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
|
||||
* repetition start from 1 */
|
||||
@ -3018,12 +3141,11 @@ bool pipe_need_reprogram(
|
||||
if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
|
||||
bool need_reprogram = false;
|
||||
struct dc *dc = pipe_ctx_old->stream->ctx->dc;
|
||||
enum link_enc_cfg_mode mode = dc->current_state->res_ctx.link_enc_cfg_ctx.mode;
|
||||
struct link_encoder *link_enc_prev =
|
||||
link_enc_cfg_get_link_enc_used_by_stream_current(dc, pipe_ctx_old->stream);
|
||||
|
||||
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
|
||||
if (link_enc_cfg_get_link_enc_used_by_stream(dc, pipe_ctx_old->stream) != pipe_ctx->stream->link_enc)
|
||||
if (link_enc_prev != pipe_ctx->stream->link_enc)
|
||||
need_reprogram = true;
|
||||
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = mode;
|
||||
|
||||
return need_reprogram;
|
||||
}
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "core_types.h"
|
||||
|
@ -23,9 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "basics/dc_common.h"
|
||||
#include "dc.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
/* DC interface (public) */
|
||||
#include "dm_services.h"
|
||||
#include "dc.h"
|
||||
|
@ -47,7 +47,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.191"
|
||||
#define DC_VER "3.2.194"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -163,7 +163,8 @@ struct dc_color_caps {
|
||||
};
|
||||
|
||||
struct dc_dmub_caps {
|
||||
bool psr;
|
||||
bool psr;
|
||||
bool mclk_sw;
|
||||
};
|
||||
|
||||
struct dc_caps {
|
||||
@ -202,12 +203,11 @@ struct dc_caps {
|
||||
struct dc_color_caps color;
|
||||
struct dc_dmub_caps dmub_caps;
|
||||
bool dp_hpo;
|
||||
bool hdmi_frl_pcon_support;
|
||||
bool dp_hdmi21_pcon_support;
|
||||
bool edp_dsc_support;
|
||||
bool vbios_lttpr_aware;
|
||||
bool vbios_lttpr_enable;
|
||||
uint32_t max_otg_num;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
uint32_t max_cab_allocation_bytes;
|
||||
uint32_t cache_line_size;
|
||||
uint32_t cache_num_ways;
|
||||
@ -215,7 +215,6 @@ struct dc_caps {
|
||||
uint16_t subvp_prefetch_end_to_mall_start_us;
|
||||
uint16_t subvp_pstate_allow_width_us;
|
||||
uint16_t subvp_vertical_int_margin_us;
|
||||
#endif
|
||||
bool seamless_odm;
|
||||
};
|
||||
|
||||
@ -361,6 +360,8 @@ enum visual_confirm {
|
||||
VISUAL_CONFIRM_HDR = 2,
|
||||
VISUAL_CONFIRM_MPCTREE = 4,
|
||||
VISUAL_CONFIRM_PSR = 5,
|
||||
VISUAL_CONFIRM_SWAPCHAIN = 6,
|
||||
VISUAL_CONFIRM_FAMS = 7,
|
||||
VISUAL_CONFIRM_SWIZZLE = 9,
|
||||
};
|
||||
|
||||
@ -442,6 +443,8 @@ struct dc_clocks {
|
||||
bool prev_p_state_change_support;
|
||||
bool fclk_prev_p_state_change_support;
|
||||
int num_ways;
|
||||
bool fw_based_mclk_switching;
|
||||
bool fw_based_mclk_switching_shut_down;
|
||||
int prev_num_ways;
|
||||
enum dtm_pstate dtm_level;
|
||||
int max_supported_dppclk_khz;
|
||||
@ -539,9 +542,8 @@ union dpia_debug_options {
|
||||
uint32_t force_non_lttpr:1; /* bit 1 */
|
||||
uint32_t extend_aux_rd_interval:1; /* bit 2 */
|
||||
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
|
||||
uint32_t hpd_delay_in_ms:12; /* bits 4-15 */
|
||||
uint32_t disable_force_tbt3_work_around:1; /* bit 16 */
|
||||
uint32_t reserved:15;
|
||||
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
|
||||
uint32_t reserved:27;
|
||||
} bits;
|
||||
uint32_t raw;
|
||||
};
|
||||
@ -727,6 +729,7 @@ struct dc_debug_options {
|
||||
|
||||
/* Enable dmub aux for legacy ddc */
|
||||
bool enable_dmub_aux_for_legacy_ddc;
|
||||
bool disable_fams;
|
||||
bool optimize_edp_link_rate; /* eDP ILR */
|
||||
/* FEC/PSR1 sequence enable delay in 100us */
|
||||
uint8_t fec_enable_delay_in100us;
|
||||
@ -738,18 +741,21 @@ struct dc_debug_options {
|
||||
bool enable_sw_cntl_psr;
|
||||
union dpia_debug_options dpia_debug;
|
||||
bool disable_fixed_vs_aux_timeout_wa;
|
||||
uint32_t fixed_vs_aux_delay_config_wa;
|
||||
bool force_disable_subvp;
|
||||
bool force_subvp_mclk_switch;
|
||||
bool force_usr_allow;
|
||||
/* uses value at boot and disables switch */
|
||||
bool disable_dtb_ref_clk_switch;
|
||||
uint32_t fixed_vs_aux_delay_config_wa;
|
||||
bool extended_blank_optimization;
|
||||
union aux_wake_wa_options aux_wake_wa;
|
||||
uint32_t mst_start_top_delay;
|
||||
uint8_t psr_power_use_phy_fsm;
|
||||
enum dml_hostvm_override_opts dml_hostvm_override;
|
||||
bool use_legacy_soc_bb_mechanism;
|
||||
bool exit_idle_opt_for_cursor_updates;
|
||||
bool enable_single_display_2to1_odm_policy;
|
||||
bool enable_dp_dig_pixel_rate_div_policy;
|
||||
};
|
||||
|
||||
struct gpu_info_soc_bounding_box_v1_0;
|
||||
@ -802,6 +808,9 @@ struct dc {
|
||||
|
||||
const char *build_id;
|
||||
struct vm_helper *vm_helper;
|
||||
|
||||
uint32_t *dcn_reg_offsets;
|
||||
uint32_t *nbio_reg_offsets;
|
||||
};
|
||||
|
||||
enum frame_buffer_mode {
|
||||
@ -841,6 +850,15 @@ struct dc_init_data {
|
||||
|
||||
struct dpcd_vendor_signature vendor_signature;
|
||||
bool force_smu_not_present;
|
||||
/*
|
||||
* IP offset for run time initializaion of register addresses
|
||||
*
|
||||
* DCN3.5+ will fail dc_create() if these fields are null for them. They are
|
||||
* applicable starting with DCN32/321 and are not used for ASICs upstreamed
|
||||
* before them.
|
||||
*/
|
||||
uint32_t *dcn_reg_offsets;
|
||||
uint32_t *nbio_reg_offsets;
|
||||
};
|
||||
|
||||
struct dc_callback_init {
|
||||
@ -1059,6 +1077,8 @@ struct dc_plane_state {
|
||||
/* HACK: Workaround for forcing full reprogramming under some conditions */
|
||||
bool force_full_update;
|
||||
|
||||
bool is_phantom; // TODO: Change mall_stream_config into mall_plane_config instead
|
||||
|
||||
/* private to dc_surface.c */
|
||||
enum dc_irq_source irq_source;
|
||||
struct kref refcount;
|
||||
@ -1451,6 +1471,9 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable);
|
||||
/* cleanup on driver unload */
|
||||
void dc_hardware_release(struct dc *dc);
|
||||
|
||||
/* disables fw based mclk switch */
|
||||
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
|
||||
|
||||
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
|
||||
void dc_z10_restore(const struct dc *dc);
|
||||
void dc_z10_save_init(struct dc *dc);
|
||||
|
@ -252,6 +252,93 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable)
|
||||
dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
|
||||
}
|
||||
|
||||
void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
|
||||
{
|
||||
union dmub_rb_cmd cmd = { 0 };
|
||||
|
||||
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
|
||||
cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
|
||||
cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
|
||||
cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
|
||||
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
|
||||
|
||||
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
|
||||
|
||||
// Send the command to the DMCUB.
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
||||
static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
|
||||
{
|
||||
uint8_t pipes = 0;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream == stream && pipe->stream_res.tg)
|
||||
pipes = i;
|
||||
}
|
||||
return pipes;
|
||||
}
|
||||
|
||||
static int dc_dmub_srv_get_timing_generator_offset(struct dc *dc, struct dc_stream_state *stream)
|
||||
{
|
||||
int tg_inst = 0;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream == stream && pipe->stream_res.tg) {
|
||||
tg_inst = pipe->stream_res.tg->inst;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return tg_inst;
|
||||
}
|
||||
|
||||
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
|
||||
{
|
||||
union dmub_rb_cmd cmd = { 0 };
|
||||
struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
|
||||
int i = 0;
|
||||
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
|
||||
uint8_t visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
|
||||
|
||||
if (dc == NULL)
|
||||
return false;
|
||||
|
||||
// Format command.
|
||||
cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
|
||||
cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
|
||||
cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
|
||||
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
|
||||
|
||||
for (i = 0; context && i < context->stream_count; i++) {
|
||||
struct dc_stream_state *stream = context->streams[i];
|
||||
uint8_t min_refresh_in_hz = (stream->timing.min_refresh_in_uhz + 999999) / 1000000;
|
||||
int tg_inst = dc_dmub_srv_get_timing_generator_offset(dc, stream);
|
||||
|
||||
config_data->pipe_data[tg_inst].pix_clk_100hz = stream->timing.pix_clk_100hz;
|
||||
config_data->pipe_data[tg_inst].min_refresh_in_hz = min_refresh_in_hz;
|
||||
config_data->pipe_data[tg_inst].max_ramp_step = ramp_up_num_steps;
|
||||
config_data->pipe_data[tg_inst].pipes = dc_dmub_srv_get_pipes_for_stream(dc, stream);
|
||||
}
|
||||
|
||||
cmd.fw_assisted_mclk_switch.header.payload_bytes =
|
||||
sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
|
||||
|
||||
// Send the command to the DMCUB.
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
|
||||
{
|
||||
union dmub_rb_cmd cmd = { 0 };
|
||||
@ -283,6 +370,338 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
/**
|
||||
* ***********************************************************************************************
|
||||
* populate_subvp_cmd_drr_info: Helper to populate DRR pipe info for the DMCUB subvp command
|
||||
*
|
||||
* Populate the DMCUB SubVP command with DRR pipe info. All the information required for calculating
|
||||
* the SubVP + DRR microschedule is populated here.
|
||||
*
|
||||
* High level algorithm:
|
||||
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
|
||||
* 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
|
||||
* 3. Populate the drr_info with the min and max supported vtotal values
|
||||
*
|
||||
* @param [in] dc: current dc state
|
||||
* @param [in] subvp_pipe: pipe_ctx for the SubVP pipe
|
||||
* @param [in] vblank_pipe: pipe_ctx for the DRR pipe
|
||||
* @param [in] pipe_data: Pipe data which stores the VBLANK/DRR info
|
||||
*
|
||||
* @return: void
|
||||
*
|
||||
* ***********************************************************************************************
|
||||
*/
|
||||
static void populate_subvp_cmd_drr_info(struct dc *dc,
|
||||
struct pipe_ctx *subvp_pipe,
|
||||
struct pipe_ctx *vblank_pipe,
|
||||
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
|
||||
{
|
||||
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
|
||||
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
|
||||
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
|
||||
int16_t drr_frame_us = 0;
|
||||
int16_t min_drr_supported_us = 0;
|
||||
int16_t max_drr_supported_us = 0;
|
||||
int16_t max_drr_vblank_us = 0;
|
||||
int16_t max_drr_mallregion_us = 0;
|
||||
int16_t mall_region_us = 0;
|
||||
int16_t prefetch_us = 0;
|
||||
int16_t subvp_active_us = 0;
|
||||
int16_t drr_active_us = 0;
|
||||
int16_t min_vtotal_supported = 0;
|
||||
int16_t max_vtotal_supported = 0;
|
||||
|
||||
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
|
||||
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
|
||||
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
|
||||
|
||||
drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
|
||||
(int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
|
||||
// P-State allow width and FW delays already included phantom_timing->v_addressable
|
||||
mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
|
||||
(int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
|
||||
min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
|
||||
min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
|
||||
(div64_s64((int64_t)min_drr_supported_us, 1000000)),
|
||||
(int64_t)drr_timing->h_total);
|
||||
|
||||
prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
|
||||
(int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
|
||||
dc->caps.subvp_prefetch_end_to_mall_start_us);
|
||||
subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
|
||||
(int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
|
||||
drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
|
||||
(int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
|
||||
max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
|
||||
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
|
||||
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
|
||||
max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
|
||||
(int64_t)drr_timing->h_total);
|
||||
|
||||
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
|
||||
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
|
||||
}
|
||||
|
||||
/**
|
||||
* ***********************************************************************************************
|
||||
* populate_subvp_cmd_vblank_pipe_info: Helper to populate VBLANK pipe info for the DMUB subvp command
|
||||
*
|
||||
* Populate the DMCUB SubVP command with VBLANK pipe info. All the information required to calculate
|
||||
* the microschedule for SubVP + VBLANK case is stored in the pipe_data (subvp_data and vblank_data).
|
||||
* Also check if the VBLANK pipe is a DRR display -- if it is make a call to populate drr_info.
|
||||
*
|
||||
* @param [in] dc: current dc state
|
||||
* @param [in] context: new dc state
|
||||
* @param [in] cmd: DMUB cmd to be populated with SubVP info
|
||||
* @param [in] vblank_pipe: pipe_ctx for the VBLANK pipe
|
||||
* @param [in] cmd_pipe_index: index for the pipe array in DMCUB SubVP cmd
|
||||
*
|
||||
* @return: void
|
||||
*
|
||||
* ***********************************************************************************************
|
||||
*/
|
||||
static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
union dmub_rb_cmd *cmd,
|
||||
struct pipe_ctx *vblank_pipe,
|
||||
uint8_t cmd_pipe_index)
|
||||
{
|
||||
uint32_t i;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
|
||||
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
|
||||
|
||||
// Find the SubVP pipe
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
// We check for master pipe, but it shouldn't matter since we only need
|
||||
// the pipe for timing info (stream should be same for any pipe splits)
|
||||
if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
// Find the SubVP pipe
|
||||
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
break;
|
||||
}
|
||||
|
||||
pipe_data->mode = VBLANK;
|
||||
pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
|
||||
pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
|
||||
vblank_pipe->stream->timing.v_front_porch;
|
||||
pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
|
||||
pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
|
||||
pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
|
||||
pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
|
||||
pipe_data->pipe_config.vblank_data.vblank_end =
|
||||
vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
|
||||
|
||||
if (vblank_pipe->stream->ignore_msa_timing_param)
|
||||
populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* ***********************************************************************************************
|
||||
* update_subvp_prefetch_end_to_mall_start: Helper for SubVP + SubVP case
|
||||
*
|
||||
* For SubVP + SubVP, we use a single vertical interrupt to start the microschedule for both
|
||||
* SubVP pipes. In order for this to work correctly, the MALL REGION of both SubVP pipes must
|
||||
* start at the same time. This function lengthens the prefetch end to mall start delay of the
|
||||
* SubVP pipe that has the shorter prefetch so that both MALL REGION's will start at the same time.
|
||||
*
|
||||
* @param [in] dc: current dc state
|
||||
* @param [in] context: new dc state
|
||||
* @param [in] cmd: DMUB cmd to be populated with SubVP info
|
||||
* @param [in] subvp_pipes: Array of SubVP pipes (should always be length 2)
|
||||
*
|
||||
* @return: void
|
||||
*
|
||||
* ***********************************************************************************************
|
||||
*/
|
||||
static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
union dmub_rb_cmd *cmd,
|
||||
struct pipe_ctx *subvp_pipes[])
|
||||
{
|
||||
uint32_t subvp0_prefetch_us = 0;
|
||||
uint32_t subvp1_prefetch_us = 0;
|
||||
uint32_t prefetch_delta_us = 0;
|
||||
struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing;
|
||||
struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
|
||||
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
|
||||
|
||||
subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
|
||||
(int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
|
||||
subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
|
||||
(int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
|
||||
|
||||
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
|
||||
// should increase it's prefetch time to match the other
|
||||
if (subvp0_prefetch_us > subvp1_prefetch_us) {
|
||||
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
|
||||
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
|
||||
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
|
||||
div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
|
||||
(phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
|
||||
(int64_t)phantom_timing1->h_total);
|
||||
} else if (subvp1_prefetch_us > subvp0_prefetch_us) {
|
||||
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
|
||||
prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
|
||||
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
|
||||
div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
|
||||
(phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
|
||||
(int64_t)phantom_timing0->h_total);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ***************************************************************************************
|
||||
* setup_subvp_dmub_command: Helper to populate the SubVP pipe info for the DMUB subvp command
|
||||
*
|
||||
* Populate the DMCUB SubVP command with SubVP pipe info. All the information required to
|
||||
* calculate the microschedule for the SubVP pipe is stored in the pipe_data of the DMCUB
|
||||
* SubVP command.
|
||||
*
|
||||
* @param [in] dc: current dc state
|
||||
* @param [in] context: new dc state
|
||||
* @param [in] cmd: DMUB cmd to be populated with SubVP info
|
||||
* @param [in] subvp_pipe: pipe_ctx for the SubVP pipe
|
||||
* @param [in] cmd_pipe_index: index for the pipe array in DMCUB SubVP cmd
|
||||
*
|
||||
* @return: void
|
||||
*
|
||||
* ***************************************************************************************
|
||||
*/
|
||||
static void populate_subvp_cmd_pipe_info(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
union dmub_rb_cmd *cmd,
|
||||
struct pipe_ctx *subvp_pipe,
|
||||
uint8_t cmd_pipe_index)
|
||||
{
|
||||
uint32_t j;
|
||||
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
|
||||
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
|
||||
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
|
||||
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
|
||||
|
||||
pipe_data->mode = SUBVP;
|
||||
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
|
||||
pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
|
||||
pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
|
||||
pipe_data->pipe_config.subvp_data.main_vblank_start =
|
||||
main_timing->v_total - main_timing->v_front_porch;
|
||||
pipe_data->pipe_config.subvp_data.main_vblank_end =
|
||||
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
|
||||
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
|
||||
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
|
||||
|
||||
// Prefetch lines is equal to VACTIVE + BP + VSYNC
|
||||
pipe_data->pipe_config.subvp_data.prefetch_lines =
|
||||
phantom_timing->v_total - phantom_timing->v_front_porch;
|
||||
|
||||
// Round up
|
||||
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
|
||||
div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
|
||||
(phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
|
||||
(int64_t)phantom_timing->h_total);
|
||||
pipe_data->pipe_config.subvp_data.processing_delay_lines =
|
||||
div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
|
||||
(phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
|
||||
(int64_t)phantom_timing->h_total);
|
||||
// Find phantom pipe index based on phantom stream
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
|
||||
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ***************************************************************************************
|
||||
* dc_dmub_setup_subvp_dmub_command: Populate the DMCUB SubVP command
|
||||
*
|
||||
* This function loops through each pipe and populates the DMUB
|
||||
* SubVP CMD info based on the pipe (e.g. SubVP, VBLANK).
|
||||
*
|
||||
* @param [in] dc: current dc state
|
||||
* @param [in] context: new dc state
|
||||
* @param [in] cmd: DMUB cmd to be populated with SubVP info
|
||||
*
|
||||
* @return: void
|
||||
*
|
||||
* ***************************************************************************************
|
||||
*/
|
||||
void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool enable)
|
||||
{
|
||||
uint8_t cmd_pipe_index = 0;
|
||||
uint32_t i, pipe_idx;
|
||||
uint8_t subvp_count = 0;
|
||||
union dmub_rb_cmd cmd;
|
||||
struct pipe_ctx *subvp_pipes[2];
|
||||
uint32_t wm_val_refclk = 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
// FW command for SUBVP
|
||||
cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
|
||||
cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
|
||||
cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
|
||||
sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (pipe->plane_state && !pipe->top_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
subvp_pipes[subvp_count++] = pipe;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
|
||||
} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
|
||||
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
|
||||
// we run through DML without calculating "natural" P-state support
|
||||
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
|
||||
|
||||
}
|
||||
pipe_idx++;
|
||||
}
|
||||
if (subvp_count == 2) {
|
||||
update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
|
||||
}
|
||||
cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
|
||||
cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
|
||||
|
||||
// Store the original watermark value for this SubVP config so we can lower it when the
|
||||
// MCLK switch starts
|
||||
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
|
||||
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
|
||||
|
||||
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
|
||||
}
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
|
||||
{
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
|
||||
|
@ -72,6 +72,10 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu
|
||||
|
||||
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
|
||||
|
||||
void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max);
|
||||
|
||||
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
|
||||
|
||||
void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
|
||||
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
|
||||
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
|
||||
|
@ -784,6 +784,8 @@ struct dc_crtc_timing {
|
||||
|
||||
uint32_t vic;
|
||||
uint32_t hdmi_vic;
|
||||
uint32_t rid;
|
||||
uint32_t fr_index;
|
||||
enum dc_timing_3d_format timing_3d_format;
|
||||
enum dc_color_depth display_color_depth;
|
||||
enum dc_pixel_encoding pixel_encoding;
|
||||
|
@ -320,6 +320,8 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
|
||||
const struct dc_stream_state *stream, struct psr_config *psr_config,
|
||||
struct psr_context *psr_context);
|
||||
|
||||
bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
|
||||
|
||||
void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
|
||||
|
||||
void dc_link_blank_all_dp_displays(struct dc *dc);
|
||||
|
@ -145,7 +145,6 @@ struct test_pattern {
|
||||
unsigned int cust_pattern_size;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
#define SUBVP_DRR_MARGIN_US 500 // 500us for DRR margin (SubVP + DRR)
|
||||
|
||||
enum mall_stream_type {
|
||||
@ -161,7 +160,6 @@ struct mall_stream_config {
|
||||
enum mall_stream_type type;
|
||||
struct dc_stream_state *paired_stream; // master / slave stream
|
||||
};
|
||||
#endif
|
||||
|
||||
struct dc_stream_state {
|
||||
// sink is deprecated, new code should not reference
|
||||
@ -277,9 +275,9 @@ struct dc_stream_state {
|
||||
|
||||
bool has_non_synchronizable_pclk;
|
||||
bool vblank_synchronized;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
struct mall_stream_config mall_stream_config;
|
||||
#endif
|
||||
|
||||
bool odm_2to1_policy_applied;
|
||||
};
|
||||
|
||||
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
|
||||
@ -327,6 +325,25 @@ bool dc_is_stream_unchanged(
|
||||
bool dc_is_stream_scaling_unchanged(
|
||||
struct dc_stream_state *old_stream, struct dc_stream_state *stream);
|
||||
|
||||
/*
|
||||
* Setup stream attributes if no stream updates are provided
|
||||
* there will be no impact on the stream parameters
|
||||
*
|
||||
* Set up surface attributes and associate to a stream
|
||||
* The surfaces parameter is an absolute set of all surface active for the stream.
|
||||
* If no surfaces are provided, the stream will be blanked; no memory read.
|
||||
* Any flip related attribute changes must be done through this interface.
|
||||
*
|
||||
* After this call:
|
||||
* Surfaces attributes are programmed and configured to be composed into stream.
|
||||
* This does not trigger a flip. No surface address is programmed.
|
||||
*
|
||||
*/
|
||||
bool dc_update_planes_and_stream(struct dc *dc,
|
||||
struct dc_surface_update *surface_updates, int surface_count,
|
||||
struct dc_stream_state *dc_stream,
|
||||
struct dc_stream_update *stream_update);
|
||||
|
||||
/*
|
||||
* Set up surface attributes and associate to a stream
|
||||
* The surfaces parameter is an absolute set of all surface active for the stream.
|
||||
@ -337,7 +354,6 @@ bool dc_is_stream_scaling_unchanged(
|
||||
* Surfaces attributes are programmed and configured to be composed into stream.
|
||||
* This does not trigger a flip. No surface address is programmed.
|
||||
*/
|
||||
|
||||
void dc_commit_updates_for_stream(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
|
@ -196,7 +196,10 @@ struct dc_panel_patch {
|
||||
unsigned int disable_fec;
|
||||
unsigned int extra_t3_ms;
|
||||
unsigned int max_dsc_target_bpp_limit;
|
||||
unsigned int embedded_tiled_slave;
|
||||
unsigned int disable_fams;
|
||||
unsigned int skip_avmute;
|
||||
unsigned int mst_start_top_delay;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
@ -277,6 +280,8 @@ enum dc_timing_source {
|
||||
TIMING_SOURCE_EDID_CEA_SVD,
|
||||
TIMING_SOURCE_EDID_CVT_3BYTE,
|
||||
TIMING_SOURCE_EDID_4BYTE,
|
||||
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
|
||||
TIMING_SOURCE_EDID_CEA_RID,
|
||||
TIMING_SOURCE_VBIOS,
|
||||
TIMING_SOURCE_CV,
|
||||
TIMING_SOURCE_TV,
|
||||
@ -871,7 +876,8 @@ struct dc_context {
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
struct cp_psp cp_psp;
|
||||
#endif
|
||||
|
||||
uint32_t *dcn_reg_offsets;
|
||||
uint32_t *nbio_reg_offsets;
|
||||
};
|
||||
|
||||
/* DSC DPCD capabilities */
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "dce_audio.h"
|
||||
#include "dce/dce_11_0_d.h"
|
||||
@ -486,6 +484,17 @@ void dce_aud_az_configure(
|
||||
|
||||
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
|
||||
|
||||
/* ACP Data - Supports AI */
|
||||
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA);
|
||||
|
||||
set_reg_field_value(
|
||||
value,
|
||||
audio_info->flags.info.SUPPORT_AI,
|
||||
AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA,
|
||||
SUPPORTS_AI);
|
||||
|
||||
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA, value);
|
||||
|
||||
/* Audio Descriptors */
|
||||
/* pass through all formats */
|
||||
for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
|
||||
|
@ -23,9 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "core_types.h"
|
||||
#include "dce_aux.h"
|
||||
@ -572,6 +569,11 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
|
||||
|
||||
memset(&aux_req, 0, sizeof(aux_req));
|
||||
|
||||
if (ddc_pin == NULL) {
|
||||
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
|
||||
return -1;
|
||||
}
|
||||
|
||||
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
|
||||
if (!acquire(aux_engine, ddc_pin)) {
|
||||
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
|
||||
|
@ -157,13 +157,16 @@
|
||||
SRII(PIXEL_RATE_CNTL, OTG, 0),\
|
||||
SRII(PIXEL_RATE_CNTL, OTG, 1)
|
||||
|
||||
|
||||
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
|
||||
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
|
||||
CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
|
||||
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
|
||||
CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
|
||||
|
||||
#define CS_COMMON_MASK_SH_LIST_DCN3_1_4(mask_sh)\
|
||||
CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh),\
|
||||
CS_SF(OTG0_PIXEL_RATE_CNTL, PIPE0_DTO_SRC_SEL, mask_sh),
|
||||
|
||||
#define CS_COMMON_MASK_SH_LIST_DCN3_2(mask_sh)\
|
||||
CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh),\
|
||||
CS_SF(OTG0_PIXEL_RATE_CNTL, PIPE0_DTO_SRC_SEL, mask_sh)
|
||||
|
@ -23,9 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "core_types.h"
|
||||
#include "link_encoder.h"
|
||||
#include "dce_dmcu.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dce_i2c.h"
|
||||
#include "dce_i2c_sw.h"
|
||||
#include "include/gpio_service_interface.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dce_ipp.h"
|
||||
#include "reg_helper.h"
|
||||
#include "dm_services.h"
|
||||
|
@ -23,9 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "reg_helper.h"
|
||||
|
||||
#include "core_types.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "basics/conversion.h"
|
||||
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dc_bios_types.h"
|
||||
#include "dce_stream_encoder.h"
|
||||
#include "reg_helper.h"
|
||||
@ -33,7 +31,6 @@
|
||||
#define DC_LOGGER \
|
||||
enc110->base.ctx->logger
|
||||
|
||||
|
||||
#define REG(reg)\
|
||||
(enc110->regs->reg)
|
||||
|
||||
@ -635,6 +632,8 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
|
||||
HDMI_GC_SEND, 1,
|
||||
HDMI_NULL_SEND, 1);
|
||||
|
||||
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
|
||||
|
||||
/* following belongs to audio */
|
||||
REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
|
||||
|
||||
|
@ -115,7 +115,7 @@
|
||||
#define SE_SF(reg_name, field_name, post_fix)\
|
||||
.field_name = reg_name ## __ ## field_name ## post_fix
|
||||
|
||||
#define SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
|
||||
#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
|
||||
SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
|
||||
SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
|
||||
SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
|
||||
@ -140,6 +140,7 @@
|
||||
SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
|
||||
SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
|
||||
SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
|
||||
SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
|
||||
SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
|
||||
SE_SF(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
|
||||
SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
|
||||
@ -202,10 +203,7 @@
|
||||
SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
|
||||
SE_SF(DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh)
|
||||
|
||||
#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
|
||||
SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
|
||||
|
||||
#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
|
||||
#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
|
||||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
|
||||
SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
|
||||
SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
|
||||
@ -227,6 +225,7 @@
|
||||
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
|
||||
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
|
||||
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
|
||||
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
|
||||
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
|
||||
SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
|
||||
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
|
||||
@ -288,9 +287,6 @@
|
||||
SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
|
||||
SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh)
|
||||
|
||||
#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
|
||||
SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
|
||||
|
||||
#define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\
|
||||
SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
|
||||
SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
|
||||
@ -414,6 +410,7 @@ struct dce_stream_encoder_shift {
|
||||
uint8_t HDMI_GC_SEND;
|
||||
uint8_t HDMI_NULL_SEND;
|
||||
uint8_t HDMI_DATA_SCRAMBLE_EN;
|
||||
uint8_t HDMI_ACP_SEND;
|
||||
uint8_t HDMI_AUDIO_INFO_SEND;
|
||||
uint8_t AFMT_AUDIO_INFO_UPDATE;
|
||||
uint8_t HDMI_AUDIO_INFO_LINE;
|
||||
@ -545,6 +542,7 @@ struct dce_stream_encoder_mask {
|
||||
uint32_t HDMI_GC_SEND;
|
||||
uint32_t HDMI_NULL_SEND;
|
||||
uint32_t HDMI_DATA_SCRAMBLE_EN;
|
||||
uint32_t HDMI_ACP_SEND;
|
||||
uint32_t HDMI_AUDIO_INFO_SEND;
|
||||
uint32_t AFMT_AUDIO_INFO_UPDATE;
|
||||
uint32_t HDMI_AUDIO_INFO_LINE;
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "link_encoder.h"
|
||||
|
@ -23,9 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "dce/dce_11_0_d.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dc.h"
|
||||
#include "dc_bios_types.h"
|
||||
@ -2173,16 +2171,14 @@ static void dce110_setup_audio_dto(
|
||||
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) {
|
||||
struct dtbclk_dto_params dto_params = {0};
|
||||
|
||||
dc->res_pool->dccg->funcs->set_audio_dtbclk_dto(
|
||||
dc->res_pool->dccg, &dto_params);
|
||||
|
||||
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
|
||||
pipe_ctx->stream_res.audio,
|
||||
pipe_ctx->stream->signal,
|
||||
&audio_output.crtc_info,
|
||||
&audio_output.pll_info);
|
||||
|
||||
/* disable audio DTBCLK DTO */
|
||||
dc->res_pool->dccg->funcs->set_audio_dtbclk_dto(
|
||||
dc->res_pool->dccg, &dto_params);
|
||||
|
||||
} else
|
||||
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
|
||||
pipe_ctx->stream_res.audio,
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
/* include DCE11 register header files */
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "link_encoder.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dce110_transform_v.h"
|
||||
#include "dm_services.h"
|
||||
#include "dc.h"
|
||||
|
@ -23,9 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "dce/dce_11_2_d.h"
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
#include "link_encoder.h"
|
||||
|
@ -24,8 +24,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dce/dce_8_0_d.h"
|
||||
#include "dce/dce_8_0_sh_mask.h"
|
||||
|
||||
|
@ -361,6 +361,8 @@ void dpp1_cnv_setup (
|
||||
select = INPUT_CSC_SELECT_ICSC;
|
||||
break;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
pixel_format = 22;
|
||||
break;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
|
||||
pixel_format = 26; /* ARGB16161616_UNORM */
|
||||
break;
|
||||
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dcn10_hubp.h"
|
||||
#include "dcn10_hubbub.h"
|
||||
|
@ -278,6 +278,9 @@ void hubp1_program_pixel_format(
|
||||
SURFACE_PIXEL_FORMAT, 10);
|
||||
break;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
REG_UPDATE(DCSURF_SURFACE_CONFIG,
|
||||
SURFACE_PIXEL_FORMAT, 22);
|
||||
break;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
|
||||
REG_UPDATE(DCSURF_SURFACE_CONFIG,
|
||||
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
|
||||
|
@ -444,7 +444,7 @@ void dcn10_log_hw_state(struct dc *dc,
|
||||
|
||||
struct link_enc_state s = {0};
|
||||
|
||||
if (lenc->funcs->read_state) {
|
||||
if (lenc && lenc->funcs->read_state) {
|
||||
lenc->funcs->read_state(lenc, &s);
|
||||
DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
|
||||
i,
|
||||
@ -1155,7 +1155,9 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
return;
|
||||
|
||||
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
|
||||
if (opp != NULL)
|
||||
// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
|
||||
// so don't wait for MPCC_IDLE in the programming sequence
|
||||
if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
|
||||
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
|
||||
|
||||
dc->optimized_required = true;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user