Merge tag 'amd-drm-fixes-5.16-2021-12-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-5.16-2021-12-29: amdgpu: - Fencing fix - XGMI fix - VCN regression fix - IP discovery regression fixes - Fix runpm documentation - Suspend/resume fixes - Yellow Carp display fixes - MCLK power management fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211229155129.5789-1-alexander.deucher@amd.com
This commit is contained in:
commit
aeeb82fd61
@ -3166,6 +3166,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
|||||||
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||||
{
|
{
|
||||||
switch (asic_type) {
|
switch (asic_type) {
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
|
case CHIP_HAINAN:
|
||||||
|
#endif
|
||||||
|
case CHIP_TOPAZ:
|
||||||
|
/* chips with no display hardware */
|
||||||
|
return false;
|
||||||
#if defined(CONFIG_DRM_AMD_DC)
|
#if defined(CONFIG_DRM_AMD_DC)
|
||||||
case CHIP_TAHITI:
|
case CHIP_TAHITI:
|
||||||
case CHIP_PITCAIRN:
|
case CHIP_PITCAIRN:
|
||||||
@ -4461,7 +4467,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
|
|||||||
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||||
struct amdgpu_reset_context *reset_context)
|
struct amdgpu_reset_context *reset_context)
|
||||||
{
|
{
|
||||||
int i, j, r = 0;
|
int i, r = 0;
|
||||||
struct amdgpu_job *job = NULL;
|
struct amdgpu_job *job = NULL;
|
||||||
bool need_full_reset =
|
bool need_full_reset =
|
||||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||||
@ -4483,15 +4489,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
/*clear job fence from fence drv to avoid force_completion
|
/*clear job fence from fence drv to avoid force_completion
|
||||||
*leave NULL and vm flush fence in fence drv */
|
*leave NULL and vm flush fence in fence drv */
|
||||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
|
amdgpu_fence_driver_clear_job_fences(ring);
|
||||||
struct dma_fence *old, **ptr;
|
|
||||||
|
|
||||||
ptr = &ring->fence_drv.fences[j];
|
|
||||||
old = rcu_dereference_protected(*ptr, 1);
|
|
||||||
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
|
|
||||||
RCU_INIT_POINTER(*ptr, NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||||
amdgpu_fence_driver_force_completion(ring);
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
}
|
}
|
||||||
|
@ -526,10 +526,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
union gc_info {
|
||||||
|
struct gc_info_v1_0 v1;
|
||||||
|
struct gc_info_v2_0 v2;
|
||||||
|
};
|
||||||
|
|
||||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct binary_header *bhdr;
|
struct binary_header *bhdr;
|
||||||
struct gc_info_v1_0 *gc_info;
|
union gc_info *gc_info;
|
||||||
|
|
||||||
if (!adev->mman.discovery_bin) {
|
if (!adev->mman.discovery_bin) {
|
||||||
DRM_ERROR("ip discovery uninitialized\n");
|
DRM_ERROR("ip discovery uninitialized\n");
|
||||||
@ -537,28 +542,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
||||||
gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
|
gc_info = (union gc_info *)(adev->mman.discovery_bin +
|
||||||
le16_to_cpu(bhdr->table_list[GC].offset));
|
le16_to_cpu(bhdr->table_list[GC].offset));
|
||||||
|
switch (gc_info->v1.header.version_major) {
|
||||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
|
case 1:
|
||||||
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
|
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
|
||||||
le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
|
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
|
||||||
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
|
le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
|
||||||
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
|
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
|
||||||
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
|
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
|
||||||
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
|
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
|
||||||
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
|
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
|
||||||
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
|
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
|
||||||
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
|
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
|
||||||
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
|
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
|
||||||
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
|
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
|
||||||
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
|
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
|
||||||
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
|
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
|
||||||
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
|
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
|
||||||
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
|
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
|
||||||
le32_to_cpu(gc_info->gc_num_sa_per_se);
|
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
|
||||||
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
|
le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
|
||||||
|
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
|
||||||
|
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
|
||||||
|
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
|
||||||
|
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
|
||||||
|
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
|
||||||
|
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
|
||||||
|
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
|
||||||
|
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
|
||||||
|
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
|
||||||
|
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
|
||||||
|
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
|
||||||
|
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
|
||||||
|
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
|
||||||
|
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
|
||||||
|
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
|
||||||
|
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
|
||||||
|
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_err(adev->dev,
|
||||||
|
"Unhandled GC info table %d.%d\n",
|
||||||
|
gc_info->v1.header.version_major,
|
||||||
|
gc_info->v1.header.version_minor);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,10 +328,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: runpm (int)
|
* DOC: runpm (int)
|
||||||
* Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
|
* Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
|
||||||
* the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
|
* the dGPUs when they are idle if supported. The default is -1 (auto enable).
|
||||||
|
* Setting the value to 0 disables this functionality.
|
||||||
*/
|
*/
|
||||||
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
|
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
|
||||||
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
|
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2153,7 +2154,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
|||||||
adev->in_s3 = true;
|
adev->in_s3 = true;
|
||||||
r = amdgpu_device_suspend(drm_dev, true);
|
r = amdgpu_device_suspend(drm_dev, true);
|
||||||
adev->in_s3 = false;
|
adev->in_s3 = false;
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
if (!adev->in_s0ix)
|
||||||
|
r = amdgpu_asic_reset(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2234,12 +2238,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
|||||||
if (amdgpu_device_supports_px(drm_dev))
|
if (amdgpu_device_supports_px(drm_dev))
|
||||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
|
||||||
|
* proper cleanups and put itself into a state ready for PNP. That
|
||||||
|
* can address some random resuming failure observed on BOCO capable
|
||||||
|
* platforms.
|
||||||
|
* TODO: this may be also needed for PX capable platform.
|
||||||
|
*/
|
||||||
|
if (amdgpu_device_supports_boco(drm_dev))
|
||||||
|
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||||
|
|
||||||
ret = amdgpu_device_suspend(drm_dev, false);
|
ret = amdgpu_device_suspend(drm_dev, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
adev->in_runpm = false;
|
adev->in_runpm = false;
|
||||||
|
if (amdgpu_device_supports_boco(drm_dev))
|
||||||
|
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_device_supports_boco(drm_dev))
|
||||||
|
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||||
|
|
||||||
if (amdgpu_device_supports_px(drm_dev)) {
|
if (amdgpu_device_supports_px(drm_dev)) {
|
||||||
/* Only need to handle PCI state in the driver for ATPX
|
/* Only need to handle PCI state in the driver for ATPX
|
||||||
* PCI core handles it for _PR3.
|
* PCI core handles it for _PR3.
|
||||||
|
@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
|
|||||||
* Cast helper
|
* Cast helper
|
||||||
*/
|
*/
|
||||||
static const struct dma_fence_ops amdgpu_fence_ops;
|
static const struct dma_fence_ops amdgpu_fence_ops;
|
||||||
|
static const struct dma_fence_ops amdgpu_job_fence_ops;
|
||||||
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
|
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
|
||||||
{
|
{
|
||||||
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
|
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
|
||||||
|
|
||||||
if (__f->base.ops == &amdgpu_fence_ops)
|
if (__f->base.ops == &amdgpu_fence_ops ||
|
||||||
|
__f->base.ops == &amdgpu_job_fence_ops)
|
||||||
return __f;
|
return __f;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
|
|||||||
}
|
}
|
||||||
|
|
||||||
seq = ++ring->fence_drv.sync_seq;
|
seq = ++ring->fence_drv.sync_seq;
|
||||||
if (job != NULL && job->job_run_counter) {
|
if (job && job->job_run_counter) {
|
||||||
/* reinit seq for resubmitted jobs */
|
/* reinit seq for resubmitted jobs */
|
||||||
fence->seqno = seq;
|
fence->seqno = seq;
|
||||||
} else {
|
} else {
|
||||||
dma_fence_init(fence, &amdgpu_fence_ops,
|
if (job)
|
||||||
&ring->fence_drv.lock,
|
dma_fence_init(fence, &amdgpu_job_fence_ops,
|
||||||
adev->fence_context + ring->idx,
|
&ring->fence_drv.lock,
|
||||||
seq);
|
adev->fence_context + ring->idx, seq);
|
||||||
}
|
else
|
||||||
|
dma_fence_init(fence, &amdgpu_fence_ops,
|
||||||
if (job != NULL) {
|
&ring->fence_drv.lock,
|
||||||
/* mark this fence has a parent job */
|
adev->fence_context + ring->idx, seq);
|
||||||
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||||
@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
|
||||||
|
*
|
||||||
|
* @ring: fence of the ring to be cleared
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct dma_fence *old, **ptr;
|
||||||
|
|
||||||
|
for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
|
||||||
|
ptr = &ring->fence_drv.fences[i];
|
||||||
|
old = rcu_dereference_protected(*ptr, 1);
|
||||||
|
if (old && old->ops == &amdgpu_job_fence_ops)
|
||||||
|
RCU_INIT_POINTER(*ptr, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
|
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
|
||||||
*
|
*
|
||||||
@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
|
|||||||
|
|
||||||
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
return (const char *)to_amdgpu_fence(f)->ring->name;
|
||||||
|
}
|
||||||
|
|
||||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
|
||||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
{
|
||||||
|
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||||
|
|
||||||
ring = to_amdgpu_ring(job->base.sched);
|
return (const char *)to_amdgpu_ring(job->base.sched)->name;
|
||||||
} else {
|
|
||||||
ring = to_amdgpu_fence(f)->ring;
|
|
||||||
}
|
|
||||||
return (const char *)ring->name;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
|||||||
*/
|
*/
|
||||||
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
|
||||||
|
amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
|
||||||
|
|
||||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
return true;
|
||||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
}
|
||||||
|
|
||||||
ring = to_amdgpu_ring(job->base.sched);
|
/**
|
||||||
} else {
|
* amdgpu_job_fence_enable_signaling - enable signalling on job fence
|
||||||
ring = to_amdgpu_fence(f)->ring;
|
* @f: fence
|
||||||
}
|
*
|
||||||
|
* This is the simliar function with amdgpu_fence_enable_signaling above, it
|
||||||
|
* only handles the job embedded fence.
|
||||||
|
*/
|
||||||
|
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
|
||||||
|
{
|
||||||
|
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||||
|
|
||||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
|
||||||
amdgpu_fence_schedule_fallback(ring);
|
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
|
|||||||
{
|
{
|
||||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||||
|
|
||||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
|
||||||
/* free job if fence has a parent job */
|
|
||||||
struct amdgpu_job *job;
|
|
||||||
|
|
||||||
job = container_of(f, struct amdgpu_job, hw_fence);
|
|
||||||
kfree(job);
|
|
||||||
} else {
|
|
||||||
/* free fence_slab if it's separated fence*/
|
/* free fence_slab if it's separated fence*/
|
||||||
struct amdgpu_fence *fence;
|
kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
|
||||||
|
}
|
||||||
|
|
||||||
fence = to_amdgpu_fence(f);
|
/**
|
||||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
* amdgpu_job_fence_free - free up the job with embedded fence
|
||||||
}
|
*
|
||||||
|
* @rcu: RCU callback head
|
||||||
|
*
|
||||||
|
* Free up the job with embedded fence after the RCU grace period.
|
||||||
|
*/
|
||||||
|
static void amdgpu_job_fence_free(struct rcu_head *rcu)
|
||||||
|
{
|
||||||
|
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||||
|
|
||||||
|
/* free job if fence has a parent job */
|
||||||
|
kfree(container_of(f, struct amdgpu_job, hw_fence));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
|
|||||||
call_rcu(&f->rcu, amdgpu_fence_free);
|
call_rcu(&f->rcu, amdgpu_fence_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_job_fence_release - callback that job embedded fence can be freed
|
||||||
|
*
|
||||||
|
* @f: fence
|
||||||
|
*
|
||||||
|
* This is the simliar function with amdgpu_fence_release above, it
|
||||||
|
* only handles the job embedded fence.
|
||||||
|
*/
|
||||||
|
static void amdgpu_job_fence_release(struct dma_fence *f)
|
||||||
|
{
|
||||||
|
call_rcu(&f->rcu, amdgpu_job_fence_free);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dma_fence_ops amdgpu_fence_ops = {
|
static const struct dma_fence_ops amdgpu_fence_ops = {
|
||||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||||
@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
|
|||||||
.release = amdgpu_fence_release,
|
.release = amdgpu_fence_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct dma_fence_ops amdgpu_job_fence_ops = {
|
||||||
|
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||||
|
.get_timeline_name = amdgpu_job_fence_get_timeline_name,
|
||||||
|
.enable_signaling = amdgpu_job_fence_enable_signaling,
|
||||||
|
.release = amdgpu_job_fence_release,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fence debugfs
|
* Fence debugfs
|
||||||
|
@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
|
|||||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||||
|
|
||||||
/* fence flag bit to indicate the face is embedded in job*/
|
|
||||||
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
|
|
||||||
|
|
||||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||||
|
|
||||||
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
|
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
|
||||||
@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
|
|||||||
struct dma_fence **fences;
|
struct dma_fence **fences;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||||
|
|
||||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||||
|
@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
bool idle_work_unexecuted;
|
||||||
|
|
||||||
|
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||||
|
if (idle_work_unexecuted) {
|
||||||
|
if (adev->pm.dpm_enabled)
|
||||||
|
amdgpu_dpm_enable_uvd(adev, false);
|
||||||
|
}
|
||||||
|
|
||||||
r = vcn_v1_0_hw_fini(adev);
|
r = vcn_v1_0_hw_fini(adev);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||||||
union display_idle_optimization_u idle_info = { 0 };
|
union display_idle_optimization_u idle_info = { 0 };
|
||||||
idle_info.idle_info.df_request_disabled = 1;
|
idle_info.idle_info.df_request_disabled = 1;
|
||||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||||
|
idle_info.idle_info.s0i2_rdy = 1;
|
||||||
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||||
/* update power state */
|
/* update power state */
|
||||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||||
|
@ -3945,12 +3945,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||||||
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
|
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
||||||
|
|
||||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
|
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
|
||||||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||||
link_enc = pipe_ctx->stream->link->link_enc;
|
|
||||||
config.dio_output_type = pipe_ctx->stream->link->ep_type;
|
|
||||||
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
|
||||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
|
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
|
||||||
link_enc = pipe_ctx->stream->link->link_enc;
|
link_enc = pipe_ctx->stream->link->link_enc;
|
||||||
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||||
|
@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
|||||||
.get_clock = dcn10_get_clock,
|
.get_clock = dcn10_get_clock,
|
||||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||||
|
.power_down = dce110_power_down,
|
||||||
.set_backlight_level = dce110_set_backlight_level,
|
.set_backlight_level = dce110_set_backlight_level,
|
||||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||||
.set_pipe = dce110_set_pipe,
|
.set_pipe = dce110_set_pipe,
|
||||||
|
@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.timing_trace = false,
|
.timing_trace = false,
|
||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.timing_trace = false,
|
.timing_trace = false,
|
||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.min_disp_clk_khz = 100000,
|
.min_disp_clk_khz = 100000,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.timing_trace = false,
|
.timing_trace = false,
|
||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.disable_clock_gate = true,
|
.disable_clock_gate = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.disable_pplib_wm_range = true,
|
.disable_pplib_wm_range = true,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.timing_trace = false,
|
.timing_trace = false,
|
||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.timing_trace = false,
|
.timing_trace = false,
|
||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = true,
|
.disable_pplib_clock_request = true,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
|
@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
|||||||
.z10_restore = dcn31_z10_restore,
|
.z10_restore = dcn31_z10_restore,
|
||||||
.z10_save_init = dcn31_z10_save_init,
|
.z10_save_init = dcn31_z10_save_init,
|
||||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||||
|
.optimize_pwr_state = dcn21_optimize_pwr_state,
|
||||||
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
|
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
|
||||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||||
};
|
};
|
||||||
|
@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
|
|||||||
clk_src_regs(3, D),
|
clk_src_regs(3, D),
|
||||||
clk_src_regs(4, E)
|
clk_src_regs(4, E)
|
||||||
};
|
};
|
||||||
|
/*pll_id being rempped in dmub, in driver it is logical instance*/
|
||||||
|
static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
|
||||||
|
clk_src_regs(0, A),
|
||||||
|
clk_src_regs(1, B),
|
||||||
|
clk_src_regs(2, F),
|
||||||
|
clk_src_regs(3, G),
|
||||||
|
clk_src_regs(4, E)
|
||||||
|
};
|
||||||
|
|
||||||
static const struct dce110_clk_src_shift cs_shift = {
|
static const struct dce110_clk_src_shift cs_shift = {
|
||||||
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
|
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
|
||||||
@ -994,7 +1002,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||||||
.timing_trace = false,
|
.timing_trace = false,
|
||||||
.clock_trace = true,
|
.clock_trace = true,
|
||||||
.disable_pplib_clock_request = false,
|
.disable_pplib_clock_request = false,
|
||||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||||
.force_single_disp_pipe_split = false,
|
.force_single_disp_pipe_split = false,
|
||||||
.disable_dcc = DCC_ENABLE,
|
.disable_dcc = DCC_ENABLE,
|
||||||
.vsr_support = true,
|
.vsr_support = true,
|
||||||
@ -2276,14 +2284,27 @@ static bool dcn31_resource_construct(
|
|||||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||||
CLOCK_SOURCE_COMBO_PHY_PLL1,
|
CLOCK_SOURCE_COMBO_PHY_PLL1,
|
||||||
&clk_src_regs[1], false);
|
&clk_src_regs[1], false);
|
||||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
/*move phypllx_pixclk_resync to dmub next*/
|
||||||
|
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
|
||||||
|
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||||
|
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||||
|
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||||
|
&clk_src_regs_b0[2], false);
|
||||||
|
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
||||||
|
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||||
|
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||||
|
&clk_src_regs_b0[3], false);
|
||||||
|
} else {
|
||||||
|
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
|
||||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||||
&clk_src_regs[2], false);
|
&clk_src_regs[2], false);
|
||||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
|
||||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||||
&clk_src_regs[3], false);
|
&clk_src_regs[3], false);
|
||||||
|
}
|
||||||
|
|
||||||
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
|
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
|
||||||
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
dcn30_clock_source_create(ctx, ctx->dc_bios,
|
||||||
CLOCK_SOURCE_COMBO_PHY_PLL4,
|
CLOCK_SOURCE_COMBO_PHY_PLL4,
|
||||||
|
@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
|
|||||||
const struct dc_init_data *init_data,
|
const struct dc_init_data *init_data,
|
||||||
struct dc *dc);
|
struct dc *dc);
|
||||||
|
|
||||||
|
/*temp: B0 specific before switch to dcn313 headers*/
|
||||||
|
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
|
||||||
|
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
|
||||||
|
#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
|
||||||
|
#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
|
||||||
|
#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
|
||||||
|
|
||||||
|
//PHYPLLF_PIXCLK_RESYNC_CNTL
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
|
||||||
|
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
|
||||||
|
|
||||||
|
//PHYPLLG_PIXCLK_RESYNC_CNTL
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
|
||||||
|
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
|
||||||
|
#endif
|
||||||
#endif /* _DCN31_RESOURCE_H_ */
|
#endif /* _DCN31_RESOURCE_H_ */
|
||||||
|
@ -143,6 +143,55 @@ struct gc_info_v1_0 {
|
|||||||
uint32_t gc_num_gl2a;
|
uint32_t gc_num_gl2a;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct gc_info_v1_1 {
|
||||||
|
struct gpu_info_header header;
|
||||||
|
|
||||||
|
uint32_t gc_num_se;
|
||||||
|
uint32_t gc_num_wgp0_per_sa;
|
||||||
|
uint32_t gc_num_wgp1_per_sa;
|
||||||
|
uint32_t gc_num_rb_per_se;
|
||||||
|
uint32_t gc_num_gl2c;
|
||||||
|
uint32_t gc_num_gprs;
|
||||||
|
uint32_t gc_num_max_gs_thds;
|
||||||
|
uint32_t gc_gs_table_depth;
|
||||||
|
uint32_t gc_gsprim_buff_depth;
|
||||||
|
uint32_t gc_parameter_cache_depth;
|
||||||
|
uint32_t gc_double_offchip_lds_buffer;
|
||||||
|
uint32_t gc_wave_size;
|
||||||
|
uint32_t gc_max_waves_per_simd;
|
||||||
|
uint32_t gc_max_scratch_slots_per_cu;
|
||||||
|
uint32_t gc_lds_size;
|
||||||
|
uint32_t gc_num_sc_per_se;
|
||||||
|
uint32_t gc_num_sa_per_se;
|
||||||
|
uint32_t gc_num_packer_per_sc;
|
||||||
|
uint32_t gc_num_gl2a;
|
||||||
|
uint32_t gc_num_tcp_per_sa;
|
||||||
|
uint32_t gc_num_sdp_interface;
|
||||||
|
uint32_t gc_num_tcps;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gc_info_v2_0 {
|
||||||
|
struct gpu_info_header header;
|
||||||
|
|
||||||
|
uint32_t gc_num_se;
|
||||||
|
uint32_t gc_num_cu_per_sh;
|
||||||
|
uint32_t gc_num_sh_per_se;
|
||||||
|
uint32_t gc_num_rb_per_se;
|
||||||
|
uint32_t gc_num_tccs;
|
||||||
|
uint32_t gc_num_gprs;
|
||||||
|
uint32_t gc_num_max_gs_thds;
|
||||||
|
uint32_t gc_gs_table_depth;
|
||||||
|
uint32_t gc_gsprim_buff_depth;
|
||||||
|
uint32_t gc_parameter_cache_depth;
|
||||||
|
uint32_t gc_double_offchip_lds_buffer;
|
||||||
|
uint32_t gc_wave_size;
|
||||||
|
uint32_t gc_max_waves_per_simd;
|
||||||
|
uint32_t gc_max_scratch_slots_per_cu;
|
||||||
|
uint32_t gc_lds_size;
|
||||||
|
uint32_t gc_num_sc_per_se;
|
||||||
|
uint32_t gc_num_packer_per_sc;
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct harvest_info_header {
|
typedef struct harvest_info_header {
|
||||||
uint32_t signature; /* Table Signature */
|
uint32_t signature; /* Table Signature */
|
||||||
uint32_t version; /* Table Version */
|
uint32_t version; /* Table Version */
|
||||||
|
@ -1568,9 +1568,7 @@ static int smu_suspend(void *handle)
|
|||||||
|
|
||||||
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
|
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
|
||||||
|
|
||||||
/* skip CGPG when in S0ix */
|
smu_set_gfx_cgpg(&adev->smu, false);
|
||||||
if (smu->is_apu && !adev->in_s0ix)
|
|
||||||
smu_set_gfx_cgpg(&adev->smu, false);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1601,8 +1599,7 @@ static int smu_resume(void *handle)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (smu->is_apu)
|
smu_set_gfx_cgpg(&adev->smu, true);
|
||||||
smu_set_gfx_cgpg(&adev->smu, true);
|
|
||||||
|
|
||||||
smu->disable_uclk_switch = 0;
|
smu->disable_uclk_switch = 0;
|
||||||
|
|
||||||
|
@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
|
|||||||
|
|
||||||
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
|
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
|
||||||
{
|
{
|
||||||
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
|
/* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
|
||||||
|
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return smu_cmn_send_smc_msg_with_param(smu,
|
return smu_cmn_send_smc_msg_with_param(smu,
|
||||||
|
@ -1621,7 +1621,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
|||||||
{
|
{
|
||||||
return smu_cmn_send_smc_msg_with_param(smu,
|
return smu_cmn_send_smc_msg_with_param(smu,
|
||||||
SMU_MSG_GmiPwrDnControl,
|
SMU_MSG_GmiPwrDnControl,
|
||||||
en ? 1 : 0,
|
en ? 0 : 1,
|
||||||
NULL);
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user