Merge tag 'amd-drm-next-5.15-2021-08-20' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.15-2021-08-20: amdgpu: - embed hw fence into job - Misc SMU fixes - PSP TA code cleanup - RAS fixes - PWM fan speed fixes - DC workqueue cleanups - SR-IOV fixes - gfxoff delayed work fix - Pin domain check fix amdkfd: - SVM fixes radeon: - Code cleanup Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210820172335.4190-1-alexander.deucher@amd.com
This commit is contained in:
commit
697b6e28d0
@ -256,7 +256,6 @@ config DRM_AMDGPU
|
||||
select HWMON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select INTERVAL_TREE
|
||||
select CHASH
|
||||
help
|
||||
Choose this option if you have a recent AMD Radeon graphics card.
|
||||
|
||||
|
@ -1271,6 +1271,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
|
||||
|
||||
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
|
||||
|
||||
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
|
||||
|
||||
/* Common functions */
|
||||
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
|
||||
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
|
||||
|
@ -714,7 +714,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||
ret = dma_fence_wait(f, false);
|
||||
|
||||
err_ib_sched:
|
||||
dma_fence_put(f);
|
||||
amdgpu_job_free(job);
|
||||
err:
|
||||
return ret;
|
||||
|
@ -560,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
||||
type = RESET_WAVES;
|
||||
break;
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
type = SAVE_WAVES;
|
||||
break;
|
||||
default:
|
||||
type = DRAIN_PIPE;
|
||||
break;
|
||||
@ -754,6 +757,33 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||
}
|
||||
|
||||
static void program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
upper_32_bits(tba_addr >> 8) |
|
||||
(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||
@ -774,4 +804,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
.program_trap_handler_settings = program_trap_handler_settings,
|
||||
};
|
||||
|
@ -537,6 +537,9 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
||||
type = RESET_WAVES;
|
||||
break;
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
type = SAVE_WAVES;
|
||||
break;
|
||||
default:
|
||||
type = DRAIN_PIPE;
|
||||
break;
|
||||
@ -658,6 +661,33 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
|
||||
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||
}
|
||||
|
||||
static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
upper_32_bits(tba_addr >> 8) |
|
||||
(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
#if 0
|
||||
uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
|
||||
uint32_t trap_debug_wave_launch_mode,
|
||||
@ -820,6 +850,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
|
||||
.address_watch_get_offset = address_watch_get_offset_v10_3,
|
||||
.get_atc_vmid_pasid_mapping_info = NULL,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
|
||||
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
|
||||
#if 0
|
||||
.enable_debug_trap = enable_debug_trap_v10_3,
|
||||
.disable_debug_trap = disable_debug_trap_v10_3,
|
||||
|
@ -42,7 +42,8 @@
|
||||
enum hqd_dequeue_request_type {
|
||||
NO_ACTION = 0,
|
||||
DRAIN_PIPE,
|
||||
RESET_WAVES
|
||||
RESET_WAVES,
|
||||
SAVE_WAVES
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
@ -566,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
||||
type = RESET_WAVES;
|
||||
break;
|
||||
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
type = SAVE_WAVES;
|
||||
break;
|
||||
default:
|
||||
type = DRAIN_PIPE;
|
||||
break;
|
||||
@ -878,6 +882,32 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
|
||||
adev->gfx.cu_info.max_waves_per_simd;
|
||||
}
|
||||
|
||||
static void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
/*
|
||||
* Program TBA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
|
||||
lower_32_bits(tba_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
|
||||
upper_32_bits(tba_addr >> 8));
|
||||
|
||||
/*
|
||||
* Program TMA registers
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
|
||||
lower_32_bits(tma_addr >> 8));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
|
||||
upper_32_bits(tma_addr >> 8));
|
||||
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
||||
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
||||
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
||||
@ -899,4 +929,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
||||
};
|
||||
|
@ -1414,7 +1414,7 @@ no_preempt:
|
||||
continue;
|
||||
}
|
||||
job = to_amdgpu_job(s_job);
|
||||
if (preempted && job->fence == fence)
|
||||
if (preempted && (&job->hw_fence) == fence)
|
||||
/* mark the job as preempted */
|
||||
job->preemption_status |= AMDGPU_IB_PREEMPTED;
|
||||
}
|
||||
|
@ -2829,12 +2829,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
|
||||
struct amdgpu_device *adev =
|
||||
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
|
||||
|
||||
mutex_lock(&adev->gfx.gfx_off_mutex);
|
||||
if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||
WARN_ON_ONCE(adev->gfx.gfx_off_state);
|
||||
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
|
||||
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3826,7 +3825,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
{
|
||||
dev_info(adev->dev, "amdgpu: finishing device.\n");
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||
if (adev->mman.initialized) {
|
||||
flush_delayed_work(&adev->mman.bdev.wq);
|
||||
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||
}
|
||||
adev->shutdown = true;
|
||||
|
||||
/* make sure IB test finished before entering exclusive mode
|
||||
@ -4448,7 +4450,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
|
||||
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
int i, r = 0;
|
||||
int i, j, r = 0;
|
||||
struct amdgpu_job *job = NULL;
|
||||
bool need_full_reset =
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
@ -4472,6 +4474,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||
if (!ring || !ring->sched.thread)
|
||||
continue;
|
||||
|
||||
/*clear job fence from fence drv to avoid force_completion
|
||||
*leave NULL and vm flush fence in fence drv */
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
|
||||
struct dma_fence *old, **ptr;
|
||||
|
||||
ptr = &ring->fence_drv.fences[j];
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
}
|
||||
}
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
|
||||
ip->major, ip->minor,
|
||||
ip->revision);
|
||||
|
||||
if (le16_to_cpu(ip->hw_id) == VCN_HWID)
|
||||
adev->vcn.num_vcn_inst++;
|
||||
|
||||
for (k = 0; k < num_base_address; k++) {
|
||||
/*
|
||||
* convert the endianness of base addresses in place,
|
||||
@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
||||
{
|
||||
struct binary_header *bhdr;
|
||||
struct harvest_table *harvest_info;
|
||||
int i;
|
||||
int i, vcn_harvest_count = 0;
|
||||
|
||||
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
||||
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
|
||||
@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
||||
|
||||
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
|
||||
case VCN_HWID:
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
|
||||
vcn_harvest_count++;
|
||||
break;
|
||||
case DMU_HWID:
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
|
||||
@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||
|
@ -273,9 +273,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (abo) {
|
||||
|
||||
}
|
||||
if (fb && ret) {
|
||||
drm_gem_object_put(gobj);
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
|
@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
|
||||
*
|
||||
* @ring: ring the fence is associated with
|
||||
* @f: resulting fence object
|
||||
* @job: job the fence is embedded in
|
||||
* @flags: flags to pass into the subordinate .emit_fence() call
|
||||
*
|
||||
* Emits a fence command on the requested ring (all asics).
|
||||
* Returns 0 on success, -ENOMEM on failure.
|
||||
*/
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
|
||||
unsigned flags)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
struct dma_fence *fence;
|
||||
struct amdgpu_fence *am_fence;
|
||||
struct dma_fence __rcu **ptr;
|
||||
uint32_t seq;
|
||||
int r;
|
||||
|
||||
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return -ENOMEM;
|
||||
if (job == NULL) {
|
||||
/* create a sperate hw fence */
|
||||
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
|
||||
if (am_fence == NULL)
|
||||
return -ENOMEM;
|
||||
fence = &am_fence->base;
|
||||
am_fence->ring = ring;
|
||||
} else {
|
||||
/* take use of job-embedded fence */
|
||||
fence = &job->hw_fence;
|
||||
}
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
fence->ring = ring;
|
||||
dma_fence_init(&fence->base, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
if (job != NULL && job->job_run_counter) {
|
||||
/* reinit seq for resubmitted jobs */
|
||||
fence->seqno = seq;
|
||||
} else {
|
||||
dma_fence_init(fence, &amdgpu_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
adev->fence_context + ring->idx,
|
||||
seq);
|
||||
}
|
||||
|
||||
if (job != NULL) {
|
||||
/* mark this fence has a parent job */
|
||||
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
|
||||
}
|
||||
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
|
||||
@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
/* This function can't be called concurrently anyway, otherwise
|
||||
* emitting the fence would mess up the hardware ring buffer.
|
||||
*/
|
||||
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
|
||||
rcu_assign_pointer(*ptr, dma_fence_get(fence));
|
||||
|
||||
*f = &fence->base;
|
||||
*f = fence;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -621,8 +641,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
|
||||
|
||||
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
return (const char *)fence->ring->name;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
return (const char *)ring->name;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -635,13 +663,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
*/
|
||||
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
|
||||
ring = to_amdgpu_ring(job->base.sched);
|
||||
} else {
|
||||
ring = to_amdgpu_fence(f)->ring;
|
||||
}
|
||||
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
||||
DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -656,8 +691,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
static void amdgpu_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
|
||||
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
|
||||
/* free job if fence has a parent job */
|
||||
struct amdgpu_job *job;
|
||||
|
||||
job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
kfree(job);
|
||||
} else {
|
||||
/* free fence_slab if it's separated fence*/
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -680,6 +727,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
|
||||
.release = amdgpu_fence_release,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
*/
|
||||
|
@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
mutex_lock(&adev->gfx.gfx_off_mutex);
|
||||
|
||||
if (!enable)
|
||||
adev->gfx.gfx_off_req_count++;
|
||||
else if (adev->gfx.gfx_off_req_count > 0)
|
||||
if (enable) {
|
||||
/* If the count is already 0, it means there's an imbalance bug somewhere.
|
||||
* Note that the bug may be in a different caller than the one which triggers the
|
||||
* WARN_ON_ONCE.
|
||||
*/
|
||||
if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
|
||||
goto unlock;
|
||||
|
||||
adev->gfx.gfx_off_req_count--;
|
||||
|
||||
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
|
||||
} else if (!enable && adev->gfx.gfx_off_state) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
|
||||
adev->gfx.gfx_off_state = false;
|
||||
if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
|
||||
} else {
|
||||
if (adev->gfx.gfx_off_req_count == 0) {
|
||||
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
if (adev->gfx.funcs->init_spm_golden) {
|
||||
dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
|
||||
amdgpu_gfx_init_spm_golden(adev);
|
||||
if (adev->gfx.gfx_off_state &&
|
||||
!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
|
||||
adev->gfx.gfx_off_state = false;
|
||||
|
||||
if (adev->gfx.funcs->init_spm_golden) {
|
||||
dev_dbg(adev->dev,
|
||||
"GFXOFF is disabled, re-init SPM golden settings\n");
|
||||
amdgpu_gfx_init_spm_golden(adev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
adev->gfx.gfx_off_req_count++;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||
}
|
||||
|
||||
@ -615,7 +629,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
|
||||
adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->gfx.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->gfx.ras_if->name, "gfx");
|
||||
}
|
||||
fs_info.head = ih_info.head = *adev->gfx.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
|
||||
|
@ -41,7 +41,6 @@ int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
|
||||
adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->hdp.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->hdp.ras_if->name, "hdp");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->hdp.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,
|
||||
|
@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
|
||||
void
|
||||
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
u8 val;
|
||||
u8 val = 0;
|
||||
|
||||
if (!amdgpu_connector->router.ddc_valid)
|
||||
return;
|
||||
|
@ -262,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, f, fence_flags);
|
||||
r = amdgpu_fence_emit(ring, f, job, fence_flags);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
if (job && job->vmid)
|
||||
|
@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
||||
struct dma_fence *f;
|
||||
struct dma_fence *hw_fence;
|
||||
unsigned i;
|
||||
|
||||
/* use sched fence if available */
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
|
||||
if (job->hw_fence.ops == NULL)
|
||||
hw_fence = job->external_hw_fence;
|
||||
else
|
||||
hw_fence = &job->hw_fence;
|
||||
|
||||
/* use sched fence if available */
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
|
||||
for (i = 0; i < job->num_ibs; ++i)
|
||||
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
||||
}
|
||||
@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
|
||||
drm_sched_job_cleanup(s_job);
|
||||
|
||||
dma_fence_put(job->fence);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
kfree(job);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (job->hw_fence.ops != NULL)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
else
|
||||
kfree(job);
|
||||
}
|
||||
|
||||
void amdgpu_job_free(struct amdgpu_job *job)
|
||||
{
|
||||
amdgpu_job_free_resources(job);
|
||||
|
||||
dma_fence_put(job->fence);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
kfree(job);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (job->hw_fence.ops != NULL)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
else
|
||||
kfree(job);
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
|
||||
job->base.sched = &ring->sched;
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
/* record external_hw_fence for direct submit */
|
||||
job->external_hw_fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
dma_fence_put(*fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
}
|
||||
/* if gpu reset, hw fence will be replaced here */
|
||||
dma_fence_put(job->fence);
|
||||
job->fence = dma_fence_get(fence);
|
||||
|
||||
if (!job->job_run_counter)
|
||||
dma_fence_get(fence);
|
||||
else if (finished->error < 0)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
job->job_run_counter++;
|
||||
amdgpu_job_free_resources(job);
|
||||
|
||||
fence = r ? ERR_PTR(r) : fence;
|
||||
|
@ -46,7 +46,8 @@ struct amdgpu_job {
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct amdgpu_ib *ibs;
|
||||
struct dma_fence *fence; /* the hw fence */
|
||||
struct dma_fence hw_fence;
|
||||
struct dma_fence *external_hw_fence;
|
||||
uint32_t preamble_status;
|
||||
uint32_t preemption_status;
|
||||
uint32_t num_ibs;
|
||||
@ -62,6 +63,9 @@ struct amdgpu_job {
|
||||
/* user fence handling */
|
||||
uint64_t uf_addr;
|
||||
uint64_t uf_sequence;
|
||||
|
||||
/* job_run_counter >= 1 means a resubmit job */
|
||||
uint32_t job_run_counter;
|
||||
};
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
|
@ -341,27 +341,27 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
switch (query_fw->index) {
|
||||
case TA_FW_TYPE_PSP_XGMI:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
|
||||
fw_info->feature = adev->psp.xgmi.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_RAS:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_ras_ucode_version;
|
||||
fw_info->feature = adev->psp.ras.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_HDCP:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_hdcp_ucode_version;
|
||||
fw_info->feature = adev->psp.hdcp.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_DTM:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_dtm_ucode_version;
|
||||
fw_info->feature = adev->psp.dtm.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_RAP:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_rap_ucode_version;
|
||||
fw_info->feature = adev->psp.rap.feature_version;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_SECUREDISPLAY:
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
|
||||
fw_info->feature = adev->psp.securedisplay.feature_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -378,8 +378,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->feature = adev->psp.sos.feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_ASD:
|
||||
fw_info->ver = adev->psp.asd_fw_version;
|
||||
fw_info->feature = adev->psp.asd_feature_version;
|
||||
fw_info->ver = adev->psp.asd.fw_version;
|
||||
fw_info->feature = adev->psp.asd.feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_DMCU:
|
||||
fw_info->ver = adev->dm.dmcu_fw_version;
|
||||
|
@ -41,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
|
||||
adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->mmhub.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->mmhub.ras_if->name, "mmhub");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->mmhub.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
|
||||
|
@ -39,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
|
||||
adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->nbio.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->nbio.ras_if->name, "pcie_bif");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->nbio.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
|
||||
|
@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.pin_count) {
|
||||
uint32_t mem_type = bo->tbo.resource->mem_type;
|
||||
uint32_t mem_flags = bo->tbo.resource->placement;
|
||||
@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.base.import_attach)
|
||||
dma_buf_pin(bo->tbo.base.import_attach);
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
#include "soc15_common.h"
|
||||
#include "psp_v3_1.h"
|
||||
#include "psp_v10_0.h"
|
||||
@ -799,15 +800,15 @@ static int psp_asd_load(struct psp_context *psp)
|
||||
* add workaround to bypass it for sriov now.
|
||||
* TODO: add version check to make it common
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
|
||||
if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes)
|
||||
return 0;
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size);
|
||||
psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes);
|
||||
|
||||
psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
||||
psp->asd_ucode_size);
|
||||
psp->asd.size_bytes);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
@ -908,9 +909,9 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->xgmi_context.xgmi_shared_bo,
|
||||
&psp->xgmi_context.xgmi_shared_mc_addr,
|
||||
&psp->xgmi_context.xgmi_shared_buf);
|
||||
&psp->xgmi_context.context.mem_context.shared_bo,
|
||||
&psp->xgmi_context.context.mem_context.shared_mc_addr,
|
||||
&psp->xgmi_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -952,20 +953,20 @@ static int psp_xgmi_load(struct psp_context *psp)
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
|
||||
psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes);
|
||||
|
||||
psp_prep_ta_load_cmd_buf(cmd,
|
||||
psp->fw_pri_mc_addr,
|
||||
psp->ta_xgmi_ucode_size,
|
||||
psp->xgmi_context.xgmi_shared_mc_addr,
|
||||
psp->xgmi.size_bytes,
|
||||
psp->xgmi_context.context.mem_context.shared_mc_addr,
|
||||
PSP_XGMI_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->xgmi_context.initialized = 1;
|
||||
psp->xgmi_context.session_id = cmd->resp.session_id;
|
||||
psp->xgmi_context.context.initialized = true;
|
||||
psp->xgmi_context.context.session_id = cmd->resp.session_id;
|
||||
}
|
||||
|
||||
release_psp_cmd_buf(psp);
|
||||
@ -990,7 +991,7 @@ static int psp_xgmi_unload(struct psp_context *psp)
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
@ -1002,41 +1003,44 @@ static int psp_xgmi_unload(struct psp_context *psp)
|
||||
|
||||
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
{
|
||||
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
|
||||
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id);
|
||||
}
|
||||
|
||||
int psp_xgmi_terminate(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->xgmi_context.initialized)
|
||||
if (!psp->xgmi_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
ret = psp_xgmi_unload(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
psp->xgmi_context.initialized = 0;
|
||||
psp->xgmi_context.context.initialized = false;
|
||||
|
||||
/* free xgmi shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
|
||||
&psp->xgmi_context.xgmi_shared_mc_addr,
|
||||
&psp->xgmi_context.xgmi_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->xgmi_context.context.mem_context.shared_bo,
|
||||
&psp->xgmi_context.context.mem_context.shared_mc_addr,
|
||||
&psp->xgmi_context.context.mem_context.shared_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int psp_xgmi_initialize(struct psp_context *psp)
|
||||
int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
|
||||
{
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->adev->psp.ta_fw ||
|
||||
!psp->adev->psp.ta_xgmi_ucode_size ||
|
||||
!psp->adev->psp.ta_xgmi_start_addr)
|
||||
if (!psp->ta_fw ||
|
||||
!psp->xgmi.size_bytes ||
|
||||
!psp->xgmi.start_addr)
|
||||
return -ENOENT;
|
||||
|
||||
if (!psp->xgmi_context.initialized) {
|
||||
if (!load_ta)
|
||||
goto invoke;
|
||||
|
||||
if (!psp->xgmi_context.context.initialized) {
|
||||
ret = psp_xgmi_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1047,9 +1051,11 @@ int psp_xgmi_initialize(struct psp_context *psp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
invoke:
|
||||
/* Initialize XGMI session */
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
|
||||
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
|
||||
xgmi_cmd->flag_extend_link_record = set_extended_data;
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
|
||||
|
||||
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
|
||||
@ -1062,7 +1068,7 @@ int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
|
||||
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
|
||||
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
|
||||
@ -1082,7 +1088,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
int ret;
|
||||
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
|
||||
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
|
||||
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
|
||||
@ -1100,12 +1106,59 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
|
||||
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
|
||||
{
|
||||
return psp->adev->asic_type == CHIP_ALDEBARAN &&
|
||||
psp->ta_xgmi_ucode_version >= 0x2000000b;
|
||||
psp->xgmi.feature_version >= 0x2000000b;
|
||||
}
|
||||
|
||||
/*
|
||||
* Chips that support extended topology information require the driver to
|
||||
* reflect topology information in the opposite direction. This is
|
||||
* because the TA has already exceeded its link record limit and if the
|
||||
* TA holds bi-directional information, the driver would have to do
|
||||
* multiple fetches instead of just two.
|
||||
*/
|
||||
static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
|
||||
struct psp_xgmi_node_info node_info)
|
||||
{
|
||||
struct amdgpu_device *mirror_adev;
|
||||
struct amdgpu_hive_info *hive;
|
||||
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
|
||||
uint64_t dst_node_id = node_info.node_id;
|
||||
uint8_t dst_num_hops = node_info.num_hops;
|
||||
uint8_t dst_num_links = node_info.num_links;
|
||||
|
||||
hive = amdgpu_get_xgmi_hive(psp->adev);
|
||||
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
struct psp_xgmi_topology_info *mirror_top_info;
|
||||
int j;
|
||||
|
||||
if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
|
||||
continue;
|
||||
|
||||
mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
|
||||
for (j = 0; j < mirror_top_info->num_nodes; j++) {
|
||||
if (mirror_top_info->nodes[j].node_id != src_node_id)
|
||||
continue;
|
||||
|
||||
mirror_top_info->nodes[j].num_hops = dst_num_hops;
|
||||
/*
|
||||
* prevent 0 num_links value re-reflection since reflection
|
||||
* criteria is based on num_hops (direct or indirect).
|
||||
*
|
||||
*/
|
||||
if (dst_num_links)
|
||||
mirror_top_info->nodes[j].num_links = dst_num_links;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
int number_devices,
|
||||
struct psp_xgmi_topology_info *topology)
|
||||
struct psp_xgmi_topology_info *topology,
|
||||
bool get_extended_data)
|
||||
{
|
||||
struct ta_xgmi_shared_memory *xgmi_cmd;
|
||||
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
|
||||
@ -1116,8 +1169,9 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
|
||||
return -EINVAL;
|
||||
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
|
||||
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
|
||||
xgmi_cmd->flag_extend_link_record = get_extended_data;
|
||||
|
||||
/* Fill in the shared memory with topology information as input */
|
||||
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
|
||||
@ -1140,10 +1194,19 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
|
||||
topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
|
||||
for (i = 0; i < topology->num_nodes; i++) {
|
||||
topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
|
||||
topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
|
||||
topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
|
||||
topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
|
||||
/* extended data will either be 0 or equal to non-extended data */
|
||||
if (topology_info_output->nodes[i].num_hops)
|
||||
topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
|
||||
|
||||
/* non-extended data gets everything here so no need to update */
|
||||
if (!get_extended_data) {
|
||||
topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
|
||||
topology->nodes[i].is_sharing_enabled =
|
||||
topology_info_output->nodes[i].is_sharing_enabled;
|
||||
topology->nodes[i].sdma_engine =
|
||||
topology_info_output->nodes[i].sdma_engine;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Invoke xgmi ta again to get the link information */
|
||||
@ -1158,9 +1221,18 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
return ret;
|
||||
|
||||
link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
|
||||
for (i = 0; i < topology->num_nodes; i++)
|
||||
topology->nodes[i].num_links =
|
||||
for (i = 0; i < topology->num_nodes; i++) {
|
||||
/* accumulate num_links on extended data */
|
||||
topology->nodes[i].num_links = get_extended_data ?
|
||||
topology->nodes[i].num_links +
|
||||
link_info_output->nodes[i].num_links :
|
||||
link_info_output->nodes[i].num_links;
|
||||
|
||||
/* reflect the topology information for bi-directionality */
|
||||
if (psp->xgmi_context.supports_extended_data &&
|
||||
get_extended_data && topology->nodes[i].num_hops)
|
||||
psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1177,7 +1249,7 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
|
||||
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
|
||||
return -EINVAL;
|
||||
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
|
||||
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
|
||||
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
|
||||
|
||||
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
|
||||
@ -1206,9 +1278,9 @@ static int psp_ras_init_shared_buf(struct psp_context *psp)
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->ras.ras_shared_bo,
|
||||
&psp->ras.ras_shared_mc_addr,
|
||||
&psp->ras.ras_shared_buf);
|
||||
&psp->ras_context.context.mem_context.shared_bo,
|
||||
&psp->ras_context.context.mem_context.shared_mc_addr,
|
||||
&psp->ras_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1225,9 +1297,9 @@ static int psp_ras_load(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
|
||||
psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
|
||||
if (psp->adev->gmc.xgmi.connected_to_cpu)
|
||||
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
|
||||
@ -1238,18 +1310,18 @@ static int psp_ras_load(struct psp_context *psp)
|
||||
|
||||
psp_prep_ta_load_cmd_buf(cmd,
|
||||
psp->fw_pri_mc_addr,
|
||||
psp->ta_ras_ucode_size,
|
||||
psp->ras.ras_shared_mc_addr,
|
||||
psp->ras.size_bytes,
|
||||
psp->ras_context.context.mem_context.shared_mc_addr,
|
||||
PSP_RAS_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->ras.session_id = cmd->resp.session_id;
|
||||
psp->ras_context.context.session_id = cmd->resp.session_id;
|
||||
|
||||
if (!ras_cmd->ras_status)
|
||||
psp->ras.ras_initialized = true;
|
||||
psp->ras_context.context.initialized = true;
|
||||
else
|
||||
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
|
||||
}
|
||||
@ -1275,7 +1347,7 @@ static int psp_ras_unload(struct psp_context *psp)
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
@ -1290,7 +1362,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
|
||||
/*
|
||||
* TODO: bypass the loading in sriov for now
|
||||
@ -1298,7 +1370,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
|
||||
ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id);
|
||||
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
return ret;
|
||||
@ -1354,10 +1426,10 @@ int psp_ras_enable_features(struct psp_context *psp,
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
if (!psp->ras_context.context.initialized)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
if (enable)
|
||||
@ -1384,19 +1456,19 @@ static int psp_ras_terminate(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
if (!psp->ras_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
ret = psp_ras_unload(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
psp->ras.ras_initialized = false;
|
||||
psp->ras_context.context.initialized = false;
|
||||
|
||||
/* free ras shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
|
||||
&psp->ras.ras_shared_mc_addr,
|
||||
&psp->ras.ras_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->ras_context.context.mem_context.shared_bo,
|
||||
&psp->ras_context.context.mem_context.shared_mc_addr,
|
||||
&psp->ras_context.context.mem_context.shared_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1413,8 +1485,8 @@ static int psp_ras_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (!adev->psp.ta_ras_ucode_size ||
|
||||
!adev->psp.ta_ras_start_addr) {
|
||||
if (!adev->psp.ras.size_bytes ||
|
||||
!adev->psp.ras.start_addr) {
|
||||
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
@ -1460,7 +1532,7 @@ static int psp_ras_initialize(struct psp_context *psp)
|
||||
}
|
||||
}
|
||||
|
||||
if (!psp->ras.ras_initialized) {
|
||||
if (!psp->ras_context.context.initialized) {
|
||||
ret = psp_ras_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1479,10 +1551,10 @@ int psp_ras_trigger_error(struct psp_context *psp,
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
if (!psp->ras_context.context.initialized)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
|
||||
@ -1512,9 +1584,9 @@ static int psp_hdcp_init_shared_buf(struct psp_context *psp)
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->hdcp_context.hdcp_shared_bo,
|
||||
&psp->hdcp_context.hdcp_shared_mc_addr,
|
||||
&psp->hdcp_context.hdcp_shared_buf);
|
||||
&psp->hdcp_context.context.mem_context.shared_bo,
|
||||
&psp->hdcp_context.context.mem_context.shared_mc_addr,
|
||||
&psp->hdcp_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1530,22 +1602,22 @@ static int psp_hdcp_load(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
psp_copy_fw(psp, psp->ta_hdcp_start_addr,
|
||||
psp->ta_hdcp_ucode_size);
|
||||
psp_copy_fw(psp, psp->hdcp.start_addr,
|
||||
psp->hdcp.size_bytes);
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_load_cmd_buf(cmd,
|
||||
psp->fw_pri_mc_addr,
|
||||
psp->ta_hdcp_ucode_size,
|
||||
psp->hdcp_context.hdcp_shared_mc_addr,
|
||||
psp->hdcp.size_bytes,
|
||||
psp->hdcp_context.context.mem_context.shared_mc_addr,
|
||||
PSP_HDCP_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->hdcp_context.hdcp_initialized = true;
|
||||
psp->hdcp_context.session_id = cmd->resp.session_id;
|
||||
psp->hdcp_context.context.initialized = true;
|
||||
psp->hdcp_context.context.session_id = cmd->resp.session_id;
|
||||
mutex_init(&psp->hdcp_context.mutex);
|
||||
}
|
||||
|
||||
@ -1563,13 +1635,13 @@ static int psp_hdcp_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_hdcp_ucode_size ||
|
||||
!psp->adev->psp.ta_hdcp_start_addr) {
|
||||
if (!psp->hdcp.size_bytes ||
|
||||
!psp->hdcp.start_addr) {
|
||||
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
ret = psp_hdcp_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1595,7 +1667,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
@ -1612,7 +1684,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
|
||||
return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id);
|
||||
}
|
||||
|
||||
static int psp_hdcp_terminate(struct psp_context *psp)
|
||||
@ -1625,8 +1697,8 @@ static int psp_hdcp_terminate(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
if (psp->hdcp_context.hdcp_shared_buf)
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
if (psp->hdcp_context.context.mem_context.shared_buf)
|
||||
goto out;
|
||||
else
|
||||
return 0;
|
||||
@ -1636,13 +1708,13 @@ static int psp_hdcp_terminate(struct psp_context *psp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
psp->hdcp_context.hdcp_initialized = false;
|
||||
psp->hdcp_context.context.initialized = false;
|
||||
|
||||
out:
|
||||
/* free hdcp shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
|
||||
&psp->hdcp_context.hdcp_shared_mc_addr,
|
||||
&psp->hdcp_context.hdcp_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->hdcp_context.context.mem_context.shared_bo,
|
||||
&psp->hdcp_context.context.mem_context.shared_mc_addr,
|
||||
&psp->hdcp_context.context.mem_context.shared_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1659,9 +1731,9 @@ static int psp_dtm_init_shared_buf(struct psp_context *psp)
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->dtm_context.dtm_shared_bo,
|
||||
&psp->dtm_context.dtm_shared_mc_addr,
|
||||
&psp->dtm_context.dtm_shared_buf);
|
||||
&psp->dtm_context.context.mem_context.shared_bo,
|
||||
&psp->dtm_context.context.mem_context.shared_mc_addr,
|
||||
&psp->dtm_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1677,21 +1749,21 @@ static int psp_dtm_load(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
|
||||
psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_load_cmd_buf(cmd,
|
||||
psp->fw_pri_mc_addr,
|
||||
psp->ta_dtm_ucode_size,
|
||||
psp->dtm_context.dtm_shared_mc_addr,
|
||||
psp->dtm.size_bytes,
|
||||
psp->dtm_context.context.mem_context.shared_mc_addr,
|
||||
PSP_DTM_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->dtm_context.dtm_initialized = true;
|
||||
psp->dtm_context.session_id = cmd->resp.session_id;
|
||||
psp->dtm_context.context.initialized = true;
|
||||
psp->dtm_context.context.session_id = cmd->resp.session_id;
|
||||
mutex_init(&psp->dtm_context.mutex);
|
||||
}
|
||||
|
||||
@ -1710,13 +1782,13 @@ static int psp_dtm_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_dtm_ucode_size ||
|
||||
!psp->adev->psp.ta_dtm_start_addr) {
|
||||
if (!psp->dtm.size_bytes ||
|
||||
!psp->dtm.start_addr) {
|
||||
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
if (!psp->dtm_context.context.initialized) {
|
||||
ret = psp_dtm_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1742,7 +1814,7 @@ static int psp_dtm_unload(struct psp_context *psp)
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
@ -1759,7 +1831,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
|
||||
return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id);
|
||||
}
|
||||
|
||||
static int psp_dtm_terminate(struct psp_context *psp)
|
||||
@ -1772,8 +1844,8 @@ static int psp_dtm_terminate(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
if (psp->dtm_context.dtm_shared_buf)
|
||||
if (!psp->dtm_context.context.initialized) {
|
||||
if (psp->dtm_context.context.mem_context.shared_buf)
|
||||
goto out;
|
||||
else
|
||||
return 0;
|
||||
@ -1783,13 +1855,13 @@ static int psp_dtm_terminate(struct psp_context *psp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
psp->dtm_context.dtm_initialized = false;
|
||||
psp->dtm_context.context.initialized = false;
|
||||
|
||||
out:
|
||||
/* free hdcp shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
|
||||
&psp->dtm_context.dtm_shared_mc_addr,
|
||||
&psp->dtm_context.dtm_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->dtm_context.context.mem_context.shared_bo,
|
||||
&psp->dtm_context.context.mem_context.shared_mc_addr,
|
||||
&psp->dtm_context.context.mem_context.shared_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1806,9 +1878,9 @@ static int psp_rap_init_shared_buf(struct psp_context *psp)
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->rap_context.rap_shared_bo,
|
||||
&psp->rap_context.rap_shared_mc_addr,
|
||||
&psp->rap_context.rap_shared_buf);
|
||||
&psp->rap_context.context.mem_context.shared_bo,
|
||||
&psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
&psp->rap_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1818,21 +1890,21 @@ static int psp_rap_load(struct psp_context *psp)
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
|
||||
psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
|
||||
|
||||
cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_load_cmd_buf(cmd,
|
||||
psp->fw_pri_mc_addr,
|
||||
psp->ta_rap_ucode_size,
|
||||
psp->rap_context.rap_shared_mc_addr,
|
||||
psp->rap.size_bytes,
|
||||
psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
PSP_RAP_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->rap_context.rap_initialized = true;
|
||||
psp->rap_context.session_id = cmd->resp.session_id;
|
||||
psp->rap_context.context.initialized = true;
|
||||
psp->rap_context.context.session_id = cmd->resp.session_id;
|
||||
mutex_init(&psp->rap_context.mutex);
|
||||
}
|
||||
|
||||
@ -1846,7 +1918,7 @@ static int psp_rap_unload(struct psp_context *psp)
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
@ -1866,13 +1938,13 @@ static int psp_rap_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_rap_ucode_size ||
|
||||
!psp->adev->psp.ta_rap_start_addr) {
|
||||
if (!psp->rap.size_bytes ||
|
||||
!psp->rap.start_addr) {
|
||||
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->rap_context.rap_initialized) {
|
||||
if (!psp->rap_context.context.initialized) {
|
||||
ret = psp_rap_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1886,11 +1958,11 @@ static int psp_rap_initialize(struct psp_context *psp)
|
||||
if (ret || status != TA_RAP_STATUS__SUCCESS) {
|
||||
psp_rap_unload(psp);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
|
||||
&psp->rap_context.rap_shared_mc_addr,
|
||||
&psp->rap_context.rap_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->rap_context.context.mem_context.shared_bo,
|
||||
&psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
&psp->rap_context.context.mem_context.shared_buf);
|
||||
|
||||
psp->rap_context.rap_initialized = false;
|
||||
psp->rap_context.context.initialized = false;
|
||||
|
||||
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
|
||||
ret, status);
|
||||
@ -1905,17 +1977,17 @@ static int psp_rap_terminate(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->rap_context.rap_initialized)
|
||||
if (!psp->rap_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
ret = psp_rap_unload(psp);
|
||||
|
||||
psp->rap_context.rap_initialized = false;
|
||||
psp->rap_context.context.initialized = false;
|
||||
|
||||
/* free rap shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
|
||||
&psp->rap_context.rap_shared_mc_addr,
|
||||
&psp->rap_context.rap_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->rap_context.context.mem_context.shared_bo,
|
||||
&psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
&psp->rap_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1925,7 +1997,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
|
||||
struct ta_rap_shared_memory *rap_cmd;
|
||||
int ret = 0;
|
||||
|
||||
if (!psp->rap_context.rap_initialized)
|
||||
if (!psp->rap_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
|
||||
@ -1935,13 +2007,13 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
|
||||
mutex_lock(&psp->rap_context.mutex);
|
||||
|
||||
rap_cmd = (struct ta_rap_shared_memory *)
|
||||
psp->rap_context.rap_shared_buf;
|
||||
psp->rap_context.context.mem_context.shared_buf;
|
||||
memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
|
||||
|
||||
rap_cmd->cmd_id = ta_cmd_id;
|
||||
rap_cmd->validation_method_id = METHOD_A;
|
||||
|
||||
ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
|
||||
ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
@ -1966,9 +2038,9 @@ static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->securedisplay_context.securedisplay_shared_bo,
|
||||
&psp->securedisplay_context.securedisplay_shared_mc_addr,
|
||||
&psp->securedisplay_context.securedisplay_shared_buf);
|
||||
&psp->securedisplay_context.context.mem_context.shared_bo,
|
||||
&psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
&psp->securedisplay_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1979,19 +2051,19 @@ static int psp_securedisplay_load(struct psp_context *psp)
|
||||
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
|
||||
memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
|
||||
|
||||
psp_prep_ta_load_cmd_buf(cmd,
|
||||
psp->fw_pri_mc_addr,
|
||||
psp->ta_securedisplay_ucode_size,
|
||||
psp->securedisplay_context.securedisplay_shared_mc_addr,
|
||||
psp->securedisplay.size_bytes,
|
||||
psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->securedisplay_context.securedisplay_initialized = true;
|
||||
psp->securedisplay_context.session_id = cmd->resp.session_id;
|
||||
psp->securedisplay_context.context.initialized = true;
|
||||
psp->securedisplay_context.context.session_id = cmd->resp.session_id;
|
||||
mutex_init(&psp->securedisplay_context.mutex);
|
||||
}
|
||||
|
||||
@ -2005,7 +2077,7 @@ static int psp_securedisplay_unload(struct psp_context *psp)
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
|
||||
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
|
||||
psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
||||
@ -2025,13 +2097,13 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->adev->psp.ta_securedisplay_ucode_size ||
|
||||
!psp->adev->psp.ta_securedisplay_start_addr) {
|
||||
if (!psp->securedisplay.size_bytes ||
|
||||
!psp->securedisplay.start_addr) {
|
||||
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!psp->securedisplay_context.securedisplay_initialized) {
|
||||
if (!psp->securedisplay_context.context.initialized) {
|
||||
ret = psp_securedisplay_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -2048,11 +2120,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
||||
if (ret) {
|
||||
psp_securedisplay_unload(psp);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
|
||||
&psp->securedisplay_context.securedisplay_shared_mc_addr,
|
||||
&psp->securedisplay_context.securedisplay_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->securedisplay_context.context.mem_context.shared_bo,
|
||||
&psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
&psp->securedisplay_context.context.mem_context.shared_buf);
|
||||
|
||||
psp->securedisplay_context.securedisplay_initialized = false;
|
||||
psp->securedisplay_context.context.initialized = false;
|
||||
|
||||
dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
|
||||
return -EINVAL;
|
||||
@ -2077,19 +2149,19 @@ static int psp_securedisplay_terminate(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->securedisplay_context.securedisplay_initialized)
|
||||
if (!psp->securedisplay_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
ret = psp_securedisplay_unload(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
psp->securedisplay_context.securedisplay_initialized = false;
|
||||
psp->securedisplay_context.context.initialized = false;
|
||||
|
||||
/* free securedisplay shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
|
||||
&psp->securedisplay_context.securedisplay_shared_mc_addr,
|
||||
&psp->securedisplay_context.securedisplay_shared_buf);
|
||||
amdgpu_bo_free_kernel(&psp->securedisplay_context.context.mem_context.shared_bo,
|
||||
&psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
&psp->securedisplay_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2098,7 +2170,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->securedisplay_context.securedisplay_initialized)
|
||||
if (!psp->securedisplay_context.context.initialized)
|
||||
return -EINVAL;
|
||||
|
||||
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
|
||||
@ -2107,7 +2179,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
|
||||
ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
|
||||
ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id);
|
||||
|
||||
mutex_unlock(&psp->securedisplay_context.mutex);
|
||||
|
||||
@ -2420,7 +2492,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
struct amdgpu_firmware_info *ucode =
|
||||
&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
|
||||
struct amdgpu_ras *ras = psp->ras.ras;
|
||||
struct amdgpu_ras *ras = psp->ras_context.ras;
|
||||
|
||||
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
@ -2625,7 +2697,7 @@ skip_memalloc:
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (psp->adev->psp.ta_fw) {
|
||||
if (psp->ta_fw) {
|
||||
ret = psp_ras_initialize(psp);
|
||||
if (ret)
|
||||
dev_err(psp->adev->dev,
|
||||
@ -2697,7 +2769,7 @@ static int psp_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
if (psp->adev->psp.ta_fw) {
|
||||
if (psp->ta_fw) {
|
||||
psp_ras_terminate(psp);
|
||||
psp_securedisplay_terminate(psp);
|
||||
psp_rap_terminate(psp);
|
||||
@ -2727,7 +2799,7 @@ static int psp_suspend(void *handle)
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
|
||||
psp->xgmi_context.initialized == 1) {
|
||||
psp->xgmi_context.context.initialized) {
|
||||
ret = psp_xgmi_terminate(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to terminate xgmi ta\n");
|
||||
@ -2735,7 +2807,7 @@ static int psp_suspend(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
if (psp->adev->psp.ta_fw) {
|
||||
if (psp->ta_fw) {
|
||||
ret = psp_ras_terminate(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to terminate ras ta\n");
|
||||
@ -2817,7 +2889,7 @@ static int psp_resume(void *handle)
|
||||
}
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
ret = psp_xgmi_initialize(psp);
|
||||
ret = psp_xgmi_initialize(psp, false, true);
|
||||
/* Warning the XGMI seesion initialize failure
|
||||
* Instead of stop driver initialization
|
||||
*/
|
||||
@ -2826,7 +2898,7 @@ static int psp_resume(void *handle)
|
||||
"XGMI: Failed to initialize XGMI session\n");
|
||||
}
|
||||
|
||||
if (psp->adev->psp.ta_fw) {
|
||||
if (psp->ta_fw) {
|
||||
ret = psp_ras_initialize(psp);
|
||||
if (ret)
|
||||
dev_err(psp->adev->dev,
|
||||
@ -2978,10 +3050,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
|
||||
goto out;
|
||||
|
||||
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
|
||||
adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
|
||||
adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
|
||||
adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
|
||||
adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
|
||||
adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
|
||||
adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
|
||||
adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
|
||||
adev->psp.asd.start_addr = (uint8_t *)asd_hdr +
|
||||
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
|
||||
return 0;
|
||||
out:
|
||||
@ -3123,6 +3195,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
|
||||
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
|
||||
adev->psp.sos.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr->sos.offset_bytes);
|
||||
adev->psp.xgmi_context.supports_extended_data = false;
|
||||
} else {
|
||||
/* Load alternate PSP SOS FW */
|
||||
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
|
||||
@ -3137,6 +3210,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
|
||||
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
|
||||
adev->psp.sos.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
|
||||
adev->psp.xgmi_context.supports_extended_data = true;
|
||||
}
|
||||
|
||||
if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
|
||||
@ -3266,40 +3340,40 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
|
||||
|
||||
switch (desc->fw_type) {
|
||||
case TA_FW_TYPE_PSP_ASD:
|
||||
psp->asd_fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->asd_start_addr = ucode_start_addr;
|
||||
psp->asd.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->asd.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->asd.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->asd.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_XGMI:
|
||||
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ta_xgmi_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->ta_xgmi_start_addr = ucode_start_addr;
|
||||
psp->xgmi.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->xgmi.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_RAS:
|
||||
psp->ta_ras_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ta_ras_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->ta_ras_start_addr = ucode_start_addr;
|
||||
psp->ras.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ras.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->ras.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_HDCP:
|
||||
psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ta_hdcp_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->ta_hdcp_start_addr = ucode_start_addr;
|
||||
psp->hdcp.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->hdcp.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_DTM:
|
||||
psp->ta_dtm_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->ta_dtm_start_addr = ucode_start_addr;
|
||||
psp->dtm.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->dtm.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_RAP:
|
||||
psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->ta_rap_start_addr = ucode_start_addr;
|
||||
psp->rap.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->rap.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->rap.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_SECUREDISPLAY:
|
||||
psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->ta_securedisplay_start_addr = ucode_start_addr;
|
||||
psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->securedisplay.start_addr = ucode_start_addr;
|
||||
break;
|
||||
default:
|
||||
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
|
||||
|
@ -136,59 +136,32 @@ struct psp_asd_context {
|
||||
uint32_t session_id;
|
||||
};
|
||||
|
||||
struct ta_mem_context {
|
||||
struct amdgpu_bo *shared_bo;
|
||||
uint64_t shared_mc_addr;
|
||||
void *shared_buf;
|
||||
};
|
||||
|
||||
struct ta_context {
|
||||
bool initialized;
|
||||
uint32_t session_id;
|
||||
struct ta_mem_context mem_context;
|
||||
};
|
||||
|
||||
struct ta_cp_context {
|
||||
struct ta_context context;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_xgmi_context {
|
||||
uint8_t initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *xgmi_shared_bo;
|
||||
uint64_t xgmi_shared_mc_addr;
|
||||
void *xgmi_shared_buf;
|
||||
struct ta_context context;
|
||||
struct psp_xgmi_topology_info top_info;
|
||||
bool supports_extended_data;
|
||||
};
|
||||
|
||||
struct psp_ras_context {
|
||||
/*ras fw*/
|
||||
bool ras_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *ras_shared_bo;
|
||||
uint64_t ras_shared_mc_addr;
|
||||
void *ras_shared_buf;
|
||||
struct amdgpu_ras *ras;
|
||||
};
|
||||
|
||||
struct psp_hdcp_context {
|
||||
bool hdcp_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *hdcp_shared_bo;
|
||||
uint64_t hdcp_shared_mc_addr;
|
||||
void *hdcp_shared_buf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_dtm_context {
|
||||
bool dtm_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *dtm_shared_bo;
|
||||
uint64_t dtm_shared_mc_addr;
|
||||
void *dtm_shared_buf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_rap_context {
|
||||
bool rap_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *rap_shared_bo;
|
||||
uint64_t rap_shared_mc_addr;
|
||||
void *rap_shared_buf;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct psp_securedisplay_context {
|
||||
bool securedisplay_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *securedisplay_shared_bo;
|
||||
uint64_t securedisplay_shared_mc_addr;
|
||||
void *securedisplay_shared_buf;
|
||||
struct mutex mutex;
|
||||
struct ta_context context;
|
||||
struct amdgpu_ras *ras;
|
||||
};
|
||||
|
||||
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
|
||||
@ -327,11 +300,8 @@ struct psp_context
|
||||
uint64_t tmr_mc_addr;
|
||||
|
||||
/* asd firmware */
|
||||
const struct firmware *asd_fw;
|
||||
uint32_t asd_fw_version;
|
||||
uint32_t asd_feature_version;
|
||||
uint32_t asd_ucode_size;
|
||||
uint8_t *asd_start_addr;
|
||||
const struct firmware *asd_fw;
|
||||
struct psp_bin_desc asd;
|
||||
|
||||
/* toc firmware */
|
||||
const struct firmware *toc_fw;
|
||||
@ -356,36 +326,20 @@ struct psp_context
|
||||
/* xgmi ta firmware and buffer */
|
||||
const struct firmware *ta_fw;
|
||||
uint32_t ta_fw_version;
|
||||
uint32_t ta_xgmi_ucode_version;
|
||||
uint32_t ta_xgmi_ucode_size;
|
||||
uint8_t *ta_xgmi_start_addr;
|
||||
uint32_t ta_ras_ucode_version;
|
||||
uint32_t ta_ras_ucode_size;
|
||||
uint8_t *ta_ras_start_addr;
|
||||
|
||||
uint32_t ta_hdcp_ucode_version;
|
||||
uint32_t ta_hdcp_ucode_size;
|
||||
uint8_t *ta_hdcp_start_addr;
|
||||
|
||||
uint32_t ta_dtm_ucode_version;
|
||||
uint32_t ta_dtm_ucode_size;
|
||||
uint8_t *ta_dtm_start_addr;
|
||||
|
||||
uint32_t ta_rap_ucode_version;
|
||||
uint32_t ta_rap_ucode_size;
|
||||
uint8_t *ta_rap_start_addr;
|
||||
|
||||
uint32_t ta_securedisplay_ucode_version;
|
||||
uint32_t ta_securedisplay_ucode_size;
|
||||
uint8_t *ta_securedisplay_start_addr;
|
||||
struct psp_bin_desc xgmi;
|
||||
struct psp_bin_desc ras;
|
||||
struct psp_bin_desc hdcp;
|
||||
struct psp_bin_desc dtm;
|
||||
struct psp_bin_desc rap;
|
||||
struct psp_bin_desc securedisplay;
|
||||
|
||||
struct psp_asd_context asd_context;
|
||||
struct psp_xgmi_context xgmi_context;
|
||||
struct psp_ras_context ras;
|
||||
struct psp_hdcp_context hdcp_context;
|
||||
struct psp_dtm_context dtm_context;
|
||||
struct psp_rap_context rap_context;
|
||||
struct psp_securedisplay_context securedisplay_context;
|
||||
struct psp_ras_context ras_context;
|
||||
struct ta_cp_context hdcp_context;
|
||||
struct ta_cp_context dtm_context;
|
||||
struct ta_cp_context rap_context;
|
||||
struct ta_cp_context securedisplay_context;
|
||||
struct mutex mutex;
|
||||
struct psp_memory_training_context mem_train_ctx;
|
||||
|
||||
@ -452,14 +406,15 @@ int psp_gpu_reset(struct amdgpu_device *adev);
|
||||
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
|
||||
uint64_t cmd_gpu_addr, int cmd_size);
|
||||
|
||||
int psp_xgmi_initialize(struct psp_context *psp);
|
||||
int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
|
||||
int psp_xgmi_terminate(struct psp_context *psp);
|
||||
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
|
||||
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
|
||||
int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
struct psp_xgmi_topology_info *topology,
|
||||
bool get_extended_data);
|
||||
int psp_xgmi_set_topology_info(struct psp_context *psp,
|
||||
int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
|
@ -76,7 +76,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
|
||||
dev_info(adev->dev, "RAP L0 validate test success.\n");
|
||||
} else {
|
||||
rap_shared_mem = (struct ta_rap_shared_memory *)
|
||||
adev->psp.rap_context.rap_shared_buf;
|
||||
adev->psp.rap_context.context.mem_context.shared_buf;
|
||||
rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
|
||||
|
||||
dev_info(adev->dev, "RAP test failed, the output is:\n");
|
||||
@ -119,7 +119,7 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||
|
||||
if (!adev->psp.rap_context.rap_initialized)
|
||||
if (!adev->psp.rap_context.context.initialized)
|
||||
return;
|
||||
|
||||
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
|
||||
|
@ -64,7 +64,6 @@ const char *ras_block_string[] = {
|
||||
};
|
||||
|
||||
#define ras_err_str(i) (ras_error_string[ffs(i)])
|
||||
#define ras_block_str(i) (ras_block_string[i])
|
||||
|
||||
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
|
||||
|
||||
@ -530,7 +529,7 @@ static inline void put_obj(struct ras_manager *obj)
|
||||
if (obj && (--obj->use == 0))
|
||||
list_del(&obj->node);
|
||||
if (obj && (obj->use < 0))
|
||||
DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
|
||||
DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
|
||||
}
|
||||
|
||||
/* make one obj and return it. */
|
||||
@ -793,7 +792,6 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
|
||||
.type = default_ras_type,
|
||||
.sub_block_index = 0,
|
||||
};
|
||||
strcpy(head.name, ras_block_str(i));
|
||||
if (bypass) {
|
||||
/*
|
||||
* bypass psp. vbios enable ras for us.
|
||||
@ -1866,7 +1864,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras_eeprom_control *control =
|
||||
&adev->psp.ras.ras->eeprom_control;
|
||||
&adev->psp.ras_context.ras->eeprom_control;
|
||||
struct eeprom_table_record *bps;
|
||||
int ret;
|
||||
|
||||
|
@ -53,6 +53,9 @@ enum amdgpu_ras_block {
|
||||
AMDGPU_RAS_BLOCK__LAST
|
||||
};
|
||||
|
||||
extern const char *ras_block_string[];
|
||||
|
||||
#define ras_block_str(i) (ras_block_string[i])
|
||||
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
|
||||
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
|
||||
|
||||
@ -306,8 +309,6 @@ struct ras_common_if {
|
||||
enum amdgpu_ras_block block;
|
||||
enum amdgpu_ras_error_type type;
|
||||
uint32_t sub_block_index;
|
||||
/* block name */
|
||||
char name[32];
|
||||
};
|
||||
|
||||
struct amdgpu_ras {
|
||||
@ -470,8 +471,8 @@ struct ras_debug_if {
|
||||
* 8: feature disable
|
||||
*/
|
||||
|
||||
#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
|
||||
#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
|
||||
#define amdgpu_ras_get_context(adev) ((adev)->psp.ras_context.ras)
|
||||
#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras_context.ras = (ras_con))
|
||||
|
||||
/* check if ras is supported on block, say, sdma, gfx */
|
||||
static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
|
||||
|
@ -48,6 +48,9 @@
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
|
||||
|
||||
/* fence flag bit to indicate the face is embedded in job*/
|
||||
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
|
||||
|
||||
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
|
||||
|
||||
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
|
||||
@ -118,7 +121,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
|
||||
unsigned flags);
|
||||
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
|
||||
uint32_t timeout);
|
||||
|
@ -105,7 +105,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
|
||||
adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
|
||||
adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->sdma.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->sdma.ras_if->name, "sdma");
|
||||
}
|
||||
fs_info.head = ih_info->head = *adev->sdma.ras_if;
|
||||
|
||||
|
@ -80,7 +80,7 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
|
||||
void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
|
||||
enum ta_securedisplay_command command_id)
|
||||
{
|
||||
*cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
|
||||
*cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
|
||||
memset(*cmd, 0, sizeof(struct securedisplay_cmd));
|
||||
(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
|
||||
(*cmd)->cmd_id = command_id;
|
||||
@ -170,7 +170,7 @@ void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
if (!adev->psp.securedisplay_context.securedisplay_initialized)
|
||||
if (!adev->psp.securedisplay_context.context.initialized)
|
||||
return;
|
||||
|
||||
debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
|
||||
|
@ -525,9 +525,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
|
||||
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
|
||||
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
|
||||
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
|
||||
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
|
||||
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
|
||||
FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
|
||||
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
|
||||
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
|
||||
FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
|
||||
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
|
||||
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
|
||||
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
|
||||
|
@ -136,21 +136,11 @@ struct psp_firmware_header_v2_0 {
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct ta_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ta_xgmi_ucode_version;
|
||||
uint32_t ta_xgmi_offset_bytes;
|
||||
uint32_t ta_xgmi_size_bytes;
|
||||
uint32_t ta_ras_ucode_version;
|
||||
uint32_t ta_ras_offset_bytes;
|
||||
uint32_t ta_ras_size_bytes;
|
||||
uint32_t ta_hdcp_ucode_version;
|
||||
uint32_t ta_hdcp_offset_bytes;
|
||||
uint32_t ta_hdcp_size_bytes;
|
||||
uint32_t ta_dtm_ucode_version;
|
||||
uint32_t ta_dtm_offset_bytes;
|
||||
uint32_t ta_dtm_size_bytes;
|
||||
uint32_t ta_securedisplay_ucode_version;
|
||||
uint32_t ta_securedisplay_offset_bytes;
|
||||
uint32_t ta_securedisplay_size_bytes;
|
||||
struct psp_fw_legacy_bin_desc xgmi;
|
||||
struct psp_fw_legacy_bin_desc ras;
|
||||
struct psp_fw_legacy_bin_desc hdcp;
|
||||
struct psp_fw_legacy_bin_desc dtm;
|
||||
struct psp_fw_legacy_bin_desc securedisplay;
|
||||
};
|
||||
|
||||
enum ta_fw_type {
|
||||
|
@ -41,7 +41,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
|
||||
adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->umc.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->umc.ras_if->name, "umc");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->umc.ras_if;
|
||||
|
||||
|
@ -532,9 +532,9 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
|
||||
|
@ -1218,7 +1218,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
|
||||
|
||||
if (vm_flush_needed || pasid_mapping_needed) {
|
||||
r = amdgpu_fence_emit(ring, &fence, 0);
|
||||
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -498,6 +498,32 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Devices that support extended data require the entire hive to initialize with
|
||||
* the shared memory buffer flag set.
|
||||
*
|
||||
* Hive locks and conditions apply - see amdgpu_xgmi_add_device
|
||||
*/
|
||||
static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
|
||||
bool set_extended_data)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
|
||||
if (ret) {
|
||||
dev_err(tmp_adev->dev,
|
||||
"XGMI: Failed to initialize xgmi session for data partition %i\n",
|
||||
set_extended_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
{
|
||||
struct psp_xgmi_topology_info *top_info;
|
||||
@ -512,7 +538,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
|
||||
if (!adev->gmc.xgmi.pending_reset &&
|
||||
amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
|
||||
ret = psp_xgmi_initialize(&adev->psp);
|
||||
ret = psp_xgmi_initialize(&adev->psp, false, true);
|
||||
if (ret) {
|
||||
dev_err(adev->dev,
|
||||
"XGMI: Failed to initialize xgmi session\n");
|
||||
@ -575,7 +601,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
/* get latest topology info for each device from psp */
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
|
||||
&tmp_adev->psp.xgmi_context.top_info);
|
||||
&tmp_adev->psp.xgmi_context.top_info, false);
|
||||
if (ret) {
|
||||
dev_err(tmp_adev->dev,
|
||||
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
|
||||
@ -585,6 +611,34 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* get topology again for hives that support extended data */
|
||||
if (adev->psp.xgmi_context.supports_extended_data) {
|
||||
|
||||
/* initialize the hive to get extended data. */
|
||||
ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
/* get the extended data. */
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
|
||||
&tmp_adev->psp.xgmi_context.top_info, true);
|
||||
if (ret) {
|
||||
dev_err(tmp_adev->dev,
|
||||
"XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
|
||||
tmp_adev->gmc.xgmi.node_id,
|
||||
tmp_adev->gmc.xgmi.hive_id, ret);
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the hive to get non-extended data for the next round. */
|
||||
ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret && !adev->gmc.xgmi.pending_reset)
|
||||
@ -663,7 +717,6 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
|
||||
adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
|
||||
adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->gmc.xgmi.ras_if->sub_block_index = 0;
|
||||
strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
|
||||
}
|
||||
ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
|
||||
r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
|
||||
|
@ -85,7 +85,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* This shader is used to clear VGPRS and LDS, and also write the input
|
||||
* pattern into the write back buffer, which will be used by driver to
|
||||
* check whether all SIMDs have been covered.
|
||||
@ -206,7 +206,7 @@ const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* The below shaders are used to clear SGPRS, and also write the input
|
||||
* pattern into the write back buffer. The first two dispatch should be
|
||||
* scheduled simultaneously which make sure that all SGPRS could be
|
||||
@ -302,7 +302,7 @@ const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* This shader is used to clear the uninitiated sgprs after the above
|
||||
* two dispatches, because of hardware feature, dispath 0 couldn't clear
|
||||
* top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
|
||||
|
@ -75,9 +75,8 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
|
||||
max_physical_node_id = 7;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
/* just using duplicates for Aldebaran support, revisit later */
|
||||
max_num_physical_nodes = 8;
|
||||
max_physical_node_id = 7;
|
||||
max_num_physical_nodes = 16;
|
||||
max_physical_node_id = 15;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -24,9 +24,7 @@
|
||||
#ifndef __MMSCH_V1_0_H__
|
||||
#define __MMSCH_V1_0_H__
|
||||
|
||||
#define MMSCH_VERSION_MAJOR 1
|
||||
#define MMSCH_VERSION_MINOR 0
|
||||
#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
|
||||
#define MMSCH_VERSION 0x1
|
||||
|
||||
enum mmsch_v1_0_command_type {
|
||||
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
|
||||
|
@ -96,7 +96,11 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
|
||||
|
||||
static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
||||
{
|
||||
int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
|
||||
int r;
|
||||
uint64_t timeout, now;
|
||||
|
||||
now = (uint64_t)ktime_to_ms(ktime_get());
|
||||
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
|
||||
|
||||
do {
|
||||
r = xgpu_nv_mailbox_rcv_msg(adev, event);
|
||||
@ -104,8 +108,8 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
||||
return 0;
|
||||
|
||||
msleep(10);
|
||||
timeout -= 10;
|
||||
} while (timeout > 1);
|
||||
now = (uint64_t)ktime_to_ms(ktime_get());
|
||||
} while (timeout > now);
|
||||
|
||||
|
||||
return -ETIME;
|
||||
@ -149,9 +153,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
|
||||
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
|
||||
enum idh_request req)
|
||||
{
|
||||
int r;
|
||||
int r, retry = 1;
|
||||
enum idh_event event = -1;
|
||||
|
||||
send_request:
|
||||
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
|
||||
|
||||
switch (req) {
|
||||
@ -170,6 +175,9 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
|
||||
if (event != -1) {
|
||||
r = xgpu_nv_poll_msg(adev, event);
|
||||
if (r) {
|
||||
if (retry++ < 2)
|
||||
goto send_request;
|
||||
|
||||
if (req != IDH_REQ_GPU_INIT_DATA) {
|
||||
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
|
||||
return r;
|
||||
@ -279,6 +287,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
atomic_set(&adev->in_gpu_reset, 1);
|
||||
|
||||
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
|
||||
|
||||
do {
|
||||
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
||||
goto flr_done;
|
||||
|
@ -37,7 +37,8 @@ enum idh_request {
|
||||
IDH_REQ_GPU_RESET_ACCESS,
|
||||
IDH_REQ_GPU_INIT_DATA,
|
||||
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_READY_TO_RESET = 201,
|
||||
};
|
||||
|
||||
enum idh_event {
|
||||
|
@ -372,13 +372,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||
"errors detected in %s block, "
|
||||
"no user action is needed.\n",
|
||||
obj->err_data.ce_count,
|
||||
adev->nbio.ras_if->name);
|
||||
ras_block_str(adev->nbio.ras_if->block));
|
||||
|
||||
if (err_data.ue_count)
|
||||
dev_info(adev->dev, "%ld uncorrectable hardware "
|
||||
"errors detected in %s block\n",
|
||||
obj->err_data.ue_count,
|
||||
adev->nbio.ras_if->name);
|
||||
ras_block_str(adev->nbio.ras_if->block));
|
||||
}
|
||||
|
||||
dev_info(adev->dev, "RAS controller interrupt triggered "
|
||||
|
@ -84,29 +84,29 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)
|
||||
adev->psp.ta_fw->data;
|
||||
adev->psp.ta_hdcp_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
|
||||
adev->psp.ta_hdcp_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
|
||||
adev->psp.ta_hdcp_start_addr =
|
||||
adev->psp.hdcp.feature_version =
|
||||
le32_to_cpu(ta_hdr->hdcp.fw_version);
|
||||
adev->psp.hdcp.size_bytes =
|
||||
le32_to_cpu(ta_hdr->hdcp.size_bytes);
|
||||
adev->psp.hdcp.start_addr =
|
||||
(uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
adev->psp.ta_dtm_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
|
||||
adev->psp.ta_dtm_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
|
||||
adev->psp.ta_dtm_start_addr =
|
||||
(uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
|
||||
adev->psp.dtm.feature_version =
|
||||
le32_to_cpu(ta_hdr->dtm.fw_version);
|
||||
adev->psp.dtm.size_bytes =
|
||||
le32_to_cpu(ta_hdr->dtm.size_bytes);
|
||||
adev->psp.dtm.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
|
||||
adev->psp.ta_securedisplay_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
|
||||
adev->psp.ta_securedisplay_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
|
||||
adev->psp.ta_securedisplay_start_addr =
|
||||
(uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
|
||||
adev->psp.securedisplay.feature_version =
|
||||
le32_to_cpu(ta_hdr->securedisplay.fw_version);
|
||||
adev->psp.securedisplay.size_bytes =
|
||||
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
|
||||
adev->psp.securedisplay.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
|
||||
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
}
|
||||
|
@ -151,15 +151,15 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
goto out2;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
|
||||
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
|
||||
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
|
||||
adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
|
||||
adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
|
||||
adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
|
||||
adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
|
||||
adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
|
||||
adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
|
||||
adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
|
||||
adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
|
||||
le32_to_cpu(ta_hdr->ras.offset_bytes);
|
||||
}
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
@ -186,17 +186,17 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
goto out2;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
|
||||
adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
|
||||
adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
|
||||
adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
|
||||
adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
|
||||
adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
|
||||
adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
|
||||
adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
|
||||
adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
|
||||
adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
|
||||
adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
|
||||
adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
|
||||
adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
}
|
||||
break;
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
|
@ -84,23 +84,23 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)
|
||||
adev->psp.ta_fw->data;
|
||||
adev->psp.ta_hdcp_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
|
||||
adev->psp.ta_hdcp_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
|
||||
adev->psp.ta_hdcp_start_addr =
|
||||
adev->psp.hdcp.feature_version =
|
||||
le32_to_cpu(ta_hdr->hdcp.fw_version);
|
||||
adev->psp.hdcp.size_bytes =
|
||||
le32_to_cpu(ta_hdr->hdcp.size_bytes);
|
||||
adev->psp.hdcp.start_addr =
|
||||
(uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
|
||||
adev->psp.ta_dtm_ucode_version =
|
||||
le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
|
||||
adev->psp.ta_dtm_ucode_size =
|
||||
le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
|
||||
adev->psp.ta_dtm_start_addr =
|
||||
(uint8_t *)adev->psp.ta_hdcp_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
|
||||
adev->psp.dtm.feature_version =
|
||||
le32_to_cpu(ta_hdr->dtm.fw_version);
|
||||
adev->psp.dtm.size_bytes =
|
||||
le32_to_cpu(ta_hdr->dtm.size_bytes);
|
||||
adev->psp.dtm.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -134,7 +134,8 @@ struct ta_xgmi_shared_memory {
|
||||
uint32_t cmd_id;
|
||||
uint32_t resp_id;
|
||||
enum ta_xgmi_status xgmi_status;
|
||||
uint32_t reserved;
|
||||
uint8_t flag_extend_link_record;
|
||||
uint8_t reserved0[3];
|
||||
union ta_xgmi_cmd_input xgmi_in_message;
|
||||
union ta_xgmi_cmd_output xgmi_out_message;
|
||||
};
|
||||
|
@ -904,7 +904,14 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_TOPAZ:
|
||||
return amdgpu_dpm_is_baco_supported(adev);
|
||||
/* Disable BACO support for the specific polaris12 SKU temporarily */
|
||||
if ((adev->pdev->device == 0x699F) &&
|
||||
(adev->pdev->revision == 0xC7) &&
|
||||
(adev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(adev->pdev->subsystem_device == 0x0039))
|
||||
return false;
|
||||
else
|
||||
return amdgpu_dpm_is_baco_supported(adev);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -211,6 +211,15 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
|
||||
WARN_ON(!old);
|
||||
}
|
||||
|
||||
static void program_trap_handler_settings(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
if (dqm->dev->kfd2kgd->program_trap_handler_settings)
|
||||
dqm->dev->kfd2kgd->program_trap_handler_settings(
|
||||
dqm->dev->kgd, qpd->vmid,
|
||||
qpd->tba_addr, qpd->tma_addr);
|
||||
}
|
||||
|
||||
static int allocate_vmid(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd,
|
||||
struct queue *q)
|
||||
@ -241,6 +250,10 @@ static int allocate_vmid(struct device_queue_manager *dqm,
|
||||
|
||||
program_sh_mem_settings(dqm, qpd);
|
||||
|
||||
if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 &&
|
||||
dqm->dev->cwsr_enabled)
|
||||
program_trap_handler_settings(dqm, qpd);
|
||||
|
||||
/* qpd->page_table_base is set earlier when register_process()
|
||||
* is called, i.e. when the first queue is created.
|
||||
*/
|
||||
@ -582,7 +595,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
||||
}
|
||||
|
||||
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
|
||||
(dqm->dev->cwsr_enabled?
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
|
||||
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
|
||||
if (retval) {
|
||||
pr_err("destroy mqd failed\n");
|
||||
@ -675,7 +690,9 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
continue;
|
||||
|
||||
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
|
||||
(dqm->dev->cwsr_enabled?
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
|
||||
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
|
||||
if (retval && !ret)
|
||||
/* Return the first error, but keep going to
|
||||
|
@ -2675,22 +2675,26 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* svm_range_best_prefetch_location - decide the best prefetch location
|
||||
/**
|
||||
* svm_range_best_prefetch_location - decide the best prefetch location
|
||||
* @prange: svm range structure
|
||||
*
|
||||
* For xnack off:
|
||||
* If range map to single GPU, the best acutal location is prefetch loc, which
|
||||
* If range map to single GPU, the best prefetch location is prefetch_loc, which
|
||||
* can be CPU or GPU.
|
||||
*
|
||||
* If range map to multiple GPUs, only if mGPU connection on xgmi same hive,
|
||||
* the best actual location could be prefetch_loc GPU. If mGPU connection on
|
||||
* PCIe, the best actual location is always CPU, because GPU cannot access vram
|
||||
* of other GPUs, assuming PCIe small bar (large bar support is not upstream).
|
||||
* If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
|
||||
* XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
|
||||
* the best prefetch location is always CPU, because GPU can not have coherent
|
||||
* mapping VRAM of other GPUs even with large-BAR PCIe connection.
|
||||
*
|
||||
* For xnack on:
|
||||
* The best actual location is prefetch location. If mGPU connection on xgmi
|
||||
* same hive, range map to multiple GPUs. Otherwise, the range only map to
|
||||
* actual location GPU. Other GPU access vm fault will trigger migration.
|
||||
* If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
|
||||
* prefetch_loc, other GPU access will generate vm fault and trigger migration.
|
||||
*
|
||||
* If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
|
||||
* hive, the best prefetch location is prefetch_loc GPU, otherwise the best
|
||||
* prefetch location is always CPU.
|
||||
*
|
||||
* Context: Process context
|
||||
*
|
||||
@ -2710,11 +2714,6 @@ svm_range_best_prefetch_location(struct svm_range *prange)
|
||||
|
||||
p = container_of(prange->svms, struct kfd_process, svms);
|
||||
|
||||
/* xnack on */
|
||||
if (p->xnack_enabled)
|
||||
goto out;
|
||||
|
||||
/* xnack off */
|
||||
if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
|
||||
goto out;
|
||||
|
||||
@ -2724,8 +2723,12 @@ svm_range_best_prefetch_location(struct svm_range *prange)
|
||||
best_loc = 0;
|
||||
goto out;
|
||||
}
|
||||
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
|
||||
MAX_GPU_INSTANCE);
|
||||
|
||||
if (p->xnack_enabled)
|
||||
bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
|
||||
else
|
||||
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
|
||||
MAX_GPU_INSTANCE);
|
||||
|
||||
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
|
||||
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
|
||||
@ -3027,6 +3030,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
|
||||
start + size - 1, nattr);
|
||||
|
||||
/* Flush pending deferred work to avoid racing with deferred actions from
|
||||
* previous memory map changes (e.g. munmap). Concurrent memory map changes
|
||||
* can still race with get_attr because we don't hold the mmap lock. But that
|
||||
* would be a race condition in the application anyway, and undefined
|
||||
* behaviour is acceptable in that case.
|
||||
*/
|
||||
flush_work(&p->svms.deferred_list_work);
|
||||
|
||||
mmap_read_lock(mm);
|
||||
if (!svm_range_is_valid(mm, start, size)) {
|
||||
pr_debug("invalid range\n");
|
||||
|
@ -1044,10 +1044,10 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static void event_mall_stutter(struct work_struct *work)
|
||||
static void vblank_control_worker(struct work_struct *work)
|
||||
{
|
||||
|
||||
struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
|
||||
struct vblank_control_work *vblank_work =
|
||||
container_of(work, struct vblank_control_work, work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
@ -1061,23 +1061,25 @@ static void event_mall_stutter(struct work_struct *work)
|
||||
|
||||
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
|
||||
{
|
||||
struct vblank_workqueue *vblank_work;
|
||||
|
||||
vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
|
||||
if (ZERO_OR_NULL_PTR(vblank_work)) {
|
||||
kfree(vblank_work);
|
||||
return NULL;
|
||||
/* Control PSR based on vblank requirements from OS */
|
||||
if (vblank_work->stream && vblank_work->stream->link) {
|
||||
if (vblank_work->enable) {
|
||||
if (vblank_work->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(vblank_work->stream);
|
||||
} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
|
||||
!vblank_work->stream->link->psr_settings.psr_allow_active &&
|
||||
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
|
||||
amdgpu_dm_psr_enable(vblank_work->stream);
|
||||
}
|
||||
}
|
||||
|
||||
INIT_WORK(&vblank_work->mall_work, event_mall_stutter);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
return vblank_work;
|
||||
dc_stream_release(vblank_work->stream);
|
||||
|
||||
kfree(vblank_work);
|
||||
}
|
||||
|
||||
#endif
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
@ -1220,12 +1222,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->dm.dc->caps.max_links > 0) {
|
||||
adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
|
||||
|
||||
if (!adev->dm.vblank_workqueue)
|
||||
adev->dm.vblank_control_workqueue =
|
||||
create_singlethread_workqueue("dm_vblank_control_workqueue");
|
||||
if (!adev->dm.vblank_control_workqueue)
|
||||
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1298,6 +1298,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->dm.vblank_control_workqueue) {
|
||||
destroy_workqueue(adev->dm.vblank_control_workqueue);
|
||||
adev->dm.vblank_control_workqueue = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < adev->dm.display_indexes_num; i++) {
|
||||
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
|
||||
}
|
||||
@ -1321,14 +1328,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
dc_deinit_callbacks(adev->dm.dc);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->dm.vblank_workqueue) {
|
||||
adev->dm.vblank_workqueue->dm = NULL;
|
||||
kfree(adev->dm.vblank_workqueue);
|
||||
adev->dm.vblank_workqueue = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
|
||||
|
||||
if (dc_enable_dmub_notifications(adev->dm.dc)) {
|
||||
@ -6000,7 +5999,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
unsigned long flags;
|
||||
struct vblank_control_work *work;
|
||||
#endif
|
||||
int rc = 0;
|
||||
|
||||
@ -6025,12 +6024,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
return 0;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
spin_lock_irqsave(&dm->vblank_lock, flags);
|
||||
dm->vblank_workqueue->dm = dm;
|
||||
dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
|
||||
dm->vblank_workqueue->enable = enable;
|
||||
spin_unlock_irqrestore(&dm->vblank_lock, flags);
|
||||
schedule_work(&dm->vblank_workqueue->mall_work);
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&work->work, vblank_control_worker);
|
||||
work->dm = dm;
|
||||
work->acrtc = acrtc;
|
||||
work->enable = enable;
|
||||
|
||||
if (acrtc_state->stream) {
|
||||
dc_stream_retain(acrtc_state->stream);
|
||||
work->stream = acrtc_state->stream;
|
||||
}
|
||||
|
||||
queue_work(dm->vblank_control_workqueue, &work->work);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@ -8635,6 +8643,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
/* Update the planes if changed or disable if we don't have any. */
|
||||
if ((planes_count || acrtc_state->active_planes == 0) &&
|
||||
acrtc_state->stream) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/*
|
||||
* If PSR or idle optimizations are enabled then flush out
|
||||
* any pending work before hardware programming.
|
||||
*/
|
||||
flush_workqueue(dm->vblank_control_workqueue);
|
||||
#endif
|
||||
|
||||
bundle->stream_update.stream = acrtc_state->stream;
|
||||
if (new_pcrtc_state->mode_changed) {
|
||||
bundle->stream_update.src = acrtc_state->stream->src;
|
||||
@ -8703,16 +8719,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
|
||||
!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
|
||||
amdgpu_dm_link_setup_psr(acrtc_state->stream);
|
||||
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
|
||||
acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
|
||||
!acrtc_state->stream->link->psr_settings.psr_allow_active) {
|
||||
struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
|
||||
acrtc_state->stream->dm_stream_context;
|
||||
|
||||
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
|
||||
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (aconn->psr_skip_count > 0)
|
||||
aconn->psr_skip_count--;
|
||||
else
|
||||
amdgpu_dm_psr_enable(acrtc_state->stream);
|
||||
|
||||
/* Allow PSR when skip count is 0. */
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
|
||||
} else {
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry = false;
|
||||
}
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
@ -8961,8 +8981,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
|
||||
if (dc_state) {
|
||||
/* if there mode set or reset, disable eDP PSR */
|
||||
if (mode_set_reset_required)
|
||||
if (mode_set_reset_required) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
flush_workqueue(dm->vblank_control_workqueue);
|
||||
#endif
|
||||
amdgpu_dm_psr_disable_all(dm);
|
||||
}
|
||||
|
||||
dm_enable_per_frame_crtc_master_sync(dc_state);
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
@ -60,6 +60,7 @@ enum aux_return_code_type;
|
||||
|
||||
/* Forward declarations */
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_crtc;
|
||||
struct drm_device;
|
||||
struct dc;
|
||||
struct amdgpu_bo;
|
||||
@ -86,16 +87,18 @@ struct dm_compressor_info {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vblank_workqueue - Works to be executed in a separate thread during vblank
|
||||
* @mall_work: work for mall stutter
|
||||
* struct vblank_control_work - Work data for vblank control
|
||||
* @work: Kernel work data for the work event
|
||||
* @dm: amdgpu display manager device
|
||||
* @otg_inst: otg instance of which vblank is being set
|
||||
* @enable: true if enable vblank
|
||||
* @acrtc: amdgpu CRTC instance for which the event has occurred
|
||||
* @stream: DC stream for which the event has occurred
|
||||
* @enable: true if enabling vblank
|
||||
*/
|
||||
struct vblank_workqueue {
|
||||
struct work_struct mall_work;
|
||||
struct vblank_control_work {
|
||||
struct work_struct work;
|
||||
struct amdgpu_display_manager *dm;
|
||||
int otg_inst;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
struct dc_stream_state *stream;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
@ -380,11 +383,11 @@ struct amdgpu_display_manager {
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/**
|
||||
* @vblank_workqueue:
|
||||
* @vblank_control_workqueue:
|
||||
*
|
||||
* amdgpu workqueue during vblank
|
||||
* Deferred work for vblank control events.
|
||||
*/
|
||||
struct vblank_workqueue *vblank_workqueue;
|
||||
struct workqueue_struct *vblank_control_workqueue;
|
||||
#endif
|
||||
|
||||
struct drm_atomic_state *cached_state;
|
||||
|
@ -79,12 +79,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
|
||||
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
|
||||
@ -105,12 +105,12 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
|
||||
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
|
||||
@ -414,12 +414,12 @@ static bool enable_assr(void *handle, struct dc_link *link)
|
||||
struct ta_dtm_shared_memory *dtm_cmd;
|
||||
bool res = true;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
if (!psp->dtm_context.context.initialized) {
|
||||
DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
|
||||
return false;
|
||||
}
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
||||
|
||||
mutex_lock(&psp->dtm_context.mutex);
|
||||
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
||||
|
@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
|
||||
handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
|
||||
|
||||
/*allocate a new amdgpu_dm_irq_handler_data*/
|
||||
handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
|
||||
handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
|
||||
if (!handler_data_add) {
|
||||
DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
|
||||
return;
|
||||
|
@ -33,6 +33,7 @@ struct dm_irq_params {
|
||||
struct mod_vrr_params vrr_params;
|
||||
struct dc_stream_state *stream;
|
||||
int active_planes;
|
||||
bool allow_psr_entry;
|
||||
struct mod_freesync_config freesync_config;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -213,6 +213,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
drm_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
|
||||
DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
|
||||
if (!aconnector->dc_sink) {
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
|
||||
dc_sink = dc_link_add_remote_sink(
|
||||
aconnector->dc_link,
|
||||
NULL,
|
||||
0,
|
||||
&init_params);
|
||||
|
||||
if (!dc_sink) {
|
||||
DRM_ERROR("Unable to add a remote sink\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dc_sink->priv = aconnector;
|
||||
aconnector->dc_sink = dc_sink;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1481,6 +1481,22 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool should_update_pipe_for_stream(
|
||||
struct dc_state *context,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
return (pipe_ctx->stream && pipe_ctx->stream == stream);
|
||||
}
|
||||
|
||||
static inline bool should_update_pipe_for_plane(
|
||||
struct dc_state *context,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_plane_state *plane_state)
|
||||
{
|
||||
return (pipe_ctx->plane_state == plane_state);
|
||||
}
|
||||
|
||||
void dc_enable_stereo(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
@ -1491,12 +1507,15 @@ void dc_enable_stereo(
|
||||
struct pipe_ctx *pipe;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (context != NULL)
|
||||
if (context != NULL) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
else
|
||||
} else {
|
||||
context = dc->current_state;
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
for (j = 0 ; pipe && j < stream_count; j++) {
|
||||
if (streams[j] && streams[j] == pipe->stream &&
|
||||
}
|
||||
|
||||
for (j = 0; pipe && j < stream_count; j++) {
|
||||
if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
|
||||
dc->hwss.setup_stereo)
|
||||
dc->hwss.setup_stereo(pipe, dc);
|
||||
}
|
||||
@ -1530,6 +1549,12 @@ void dc_z10_restore(struct dc *dc)
|
||||
if (dc->hwss.z10_restore)
|
||||
dc->hwss.z10_restore(dc);
|
||||
}
|
||||
|
||||
void dc_z10_save_init(struct dc *dc)
|
||||
{
|
||||
if (dc->hwss.z10_save_init)
|
||||
dc->hwss.z10_save_init(dc);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Applies given context to HW and copy it into current context.
|
||||
@ -2623,6 +2648,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
{
|
||||
int i, j;
|
||||
struct pipe_ctx *top_pipe_to_program = NULL;
|
||||
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_z10_restore(dc);
|
||||
@ -2694,7 +2720,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
top_pipe_to_program->stream_res.tg);
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
|
||||
dc->hwss.interdependent_update_lock(dc, context, true);
|
||||
else
|
||||
/* Lock the top pipe while updating plane addrs, since freesync requires
|
||||
@ -2717,7 +2743,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
if (dc->hwss.program_front_end_for_ctx)
|
||||
dc->hwss.program_front_end_for_ctx(dc, context);
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
|
||||
dc->hwss.interdependent_update_lock(dc, context, false);
|
||||
else
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
@ -2733,14 +2759,14 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
if (!pipe_ctx->plane_state)
|
||||
continue;
|
||||
if (pipe_ctx->plane_state != plane_state)
|
||||
if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
|
||||
continue;
|
||||
plane_state->triplebuffer_flips = false;
|
||||
pipe_ctx->plane_state->triplebuffer_flips = false;
|
||||
if (update_type == UPDATE_TYPE_FAST &&
|
||||
dc->hwss.program_triplebuffer != NULL &&
|
||||
!plane_state->flip_immediate && dc->debug.enable_tri_buf) {
|
||||
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
|
||||
/*triple buffer for VUpdate only*/
|
||||
plane_state->triplebuffer_flips = true;
|
||||
pipe_ctx->plane_state->triplebuffer_flips = true;
|
||||
}
|
||||
}
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
@ -2756,8 +2782,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
|
||||
if (!pipe_ctx->top_pipe &&
|
||||
!pipe_ctx->prev_odm_pipe &&
|
||||
pipe_ctx->stream &&
|
||||
pipe_ctx->stream == stream) {
|
||||
should_update_pipe_for_stream(context, pipe_ctx, stream)) {
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
|
||||
if (!pipe_ctx->plane_state)
|
||||
@ -2810,15 +2835,15 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (pipe_ctx->stream != stream)
|
||||
if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->plane_state != plane_state)
|
||||
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
|
||||
continue;
|
||||
|
||||
// GSL has to be used for flip immediate
|
||||
dc->hwss.set_flip_control_gsl(pipe_ctx,
|
||||
plane_state->flip_immediate);
|
||||
pipe_ctx->plane_state->flip_immediate);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2829,25 +2854,26 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (pipe_ctx->stream != stream)
|
||||
if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->plane_state != plane_state)
|
||||
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
|
||||
continue;
|
||||
|
||||
/*program triple buffer after lock based on flip type*/
|
||||
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
|
||||
/*only enable triplebuffer for fast_update*/
|
||||
dc->hwss.program_triplebuffer(
|
||||
dc, pipe_ctx, plane_state->triplebuffer_flips);
|
||||
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
|
||||
}
|
||||
if (srf_updates[i].flip_addr)
|
||||
if (pipe_ctx->plane_state->update_flags.bits.addr_update)
|
||||
dc->hwss.update_plane_addr(dc, pipe_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
|
||||
dc->hwss.interdependent_update_lock(dc, context, false);
|
||||
else
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
@ -2891,7 +2917,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
|
||||
!pipe_ctx->stream || pipe_ctx->stream != stream ||
|
||||
!pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
|
||||
!pipe_ctx->plane_state->update_flags.bits.addr_update ||
|
||||
pipe_ctx->plane_state->skip_manual_trigger)
|
||||
continue;
|
||||
|
@ -246,6 +246,40 @@ struct dc_stream_status *dc_stream_get_status(
|
||||
return dc_stream_get_status_from_state(dc->current_state, stream);
|
||||
}
|
||||
|
||||
static void program_cursor_attributes(
|
||||
struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
const struct dc_cursor_attributes *attributes)
|
||||
{
|
||||
int i;
|
||||
struct resource_context *res_ctx;
|
||||
struct pipe_ctx *pipe_to_program = NULL;
|
||||
|
||||
if (!stream)
|
||||
return;
|
||||
|
||||
res_ctx = &dc->current_state->res_ctx;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream != stream)
|
||||
continue;
|
||||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
if (dc->hwss.set_cursor_sdr_white_level)
|
||||
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
}
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
/*
|
||||
* dc_optimize_timing_for_fsft() - dc to optimize timing
|
||||
@ -270,10 +304,7 @@ bool dc_stream_set_cursor_attributes(
|
||||
struct dc_stream_state *stream,
|
||||
const struct dc_cursor_attributes *attributes)
|
||||
{
|
||||
int i;
|
||||
struct dc *dc;
|
||||
struct resource_context *res_ctx;
|
||||
struct pipe_ctx *pipe_to_program = NULL;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool reset_idle_optimizations = false;
|
||||
#endif
|
||||
@ -293,7 +324,6 @@ bool dc_stream_set_cursor_attributes(
|
||||
}
|
||||
|
||||
dc = stream->ctx->dc;
|
||||
res_ctx = &dc->current_state->res_ctx;
|
||||
stream->cursor_attributes = *attributes;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
@ -305,25 +335,7 @@ bool dc_stream_set_cursor_attributes(
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream != stream)
|
||||
continue;
|
||||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
if (dc->hwss.set_cursor_sdr_white_level)
|
||||
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
program_cursor_attributes(dc, stream, attributes);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/* re-enable idle optimizations if necessary */
|
||||
@ -334,41 +346,19 @@ bool dc_stream_set_cursor_attributes(
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_stream_set_cursor_position(
|
||||
static void program_cursor_position(
|
||||
struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
const struct dc_cursor_position *position)
|
||||
{
|
||||
int i;
|
||||
struct dc *dc;
|
||||
struct resource_context *res_ctx;
|
||||
struct pipe_ctx *pipe_to_program = NULL;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool reset_idle_optimizations = false;
|
||||
#endif
|
||||
|
||||
if (NULL == stream) {
|
||||
dm_error("DC: dc_stream is NULL!\n");
|
||||
return false;
|
||||
}
|
||||
if (!stream)
|
||||
return;
|
||||
|
||||
if (NULL == position) {
|
||||
dm_error("DC: cursor position is NULL!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
dc = stream->ctx->dc;
|
||||
res_ctx = &dc->current_state->res_ctx;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_z10_restore(dc);
|
||||
|
||||
/* disable idle optimizations if enabling cursor */
|
||||
if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reset_idle_optimizations = true;
|
||||
}
|
||||
|
||||
#endif
|
||||
stream->cursor_position = *position;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
@ -390,7 +380,41 @@ bool dc_stream_set_cursor_position(
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
}
|
||||
|
||||
bool dc_stream_set_cursor_position(
|
||||
struct dc_stream_state *stream,
|
||||
const struct dc_cursor_position *position)
|
||||
{
|
||||
struct dc *dc;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool reset_idle_optimizations = false;
|
||||
#endif
|
||||
|
||||
if (NULL == stream) {
|
||||
dm_error("DC: dc_stream is NULL!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (NULL == position) {
|
||||
dm_error("DC: cursor position is NULL!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
dc = stream->ctx->dc;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_z10_restore(dc);
|
||||
|
||||
/* disable idle optimizations if enabling cursor */
|
||||
if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reset_idle_optimizations = true;
|
||||
}
|
||||
|
||||
#endif
|
||||
stream->cursor_position = *position;
|
||||
|
||||
program_cursor_position(dc, stream, position);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/* re-enable idle optimizations if necessary */
|
||||
if (reset_idle_optimizations)
|
||||
|
@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
|
||||
*/
|
||||
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
|
||||
dc->vm_pa_config.valid = true;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_z10_save_init(dc);
|
||||
#endif
|
||||
}
|
||||
|
||||
return num_vmids;
|
||||
|
@ -45,7 +45,7 @@
|
||||
/* forward declaration */
|
||||
struct aux_payload;
|
||||
|
||||
#define DC_VER "3.2.147"
|
||||
#define DC_VER "3.2.149"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
|
||||
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
void dc_z10_restore(struct dc *dc);
|
||||
void dc_z10_save_init(struct dc *dc);
|
||||
#endif
|
||||
|
||||
bool dc_enable_dmub_notifications(struct dc *dc);
|
||||
|
@ -42,6 +42,11 @@
|
||||
#define DC_LOGGER \
|
||||
engine->ctx->logger
|
||||
|
||||
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
|
||||
#define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
|
||||
#define LOG_FLAG_Error_I2cAux LOG_ERROR
|
||||
#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
|
||||
|
||||
#include "reg_helper.h"
|
||||
|
||||
#undef FN
|
||||
@ -623,6 +628,58 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
|
||||
#define AUX_MAX_INVALID_REPLY_RETRIES 2
|
||||
#define AUX_MAX_TIMEOUT_RETRIES 3
|
||||
|
||||
static void dce_aux_log_payload(const char *payload_name,
|
||||
unsigned char *payload, uint32_t length, uint32_t max_length_to_log)
|
||||
{
|
||||
if (!IS_DC_I2CAUX_LOGGING_ENABLED())
|
||||
return;
|
||||
|
||||
if (payload && length) {
|
||||
char hex_str[128] = {0};
|
||||
char *hex_str_ptr = &hex_str[0];
|
||||
uint32_t hex_str_remaining = sizeof(hex_str);
|
||||
unsigned char *payload_ptr = payload;
|
||||
unsigned char *payload_max_to_log_ptr = payload_ptr + min(max_length_to_log, length);
|
||||
unsigned int count;
|
||||
char *padding = "";
|
||||
|
||||
while (payload_ptr < payload_max_to_log_ptr) {
|
||||
count = snprintf_count(hex_str_ptr, hex_str_remaining, "%s%02X", padding, *payload_ptr);
|
||||
padding = " ";
|
||||
hex_str_remaining -= count;
|
||||
hex_str_ptr += count;
|
||||
payload_ptr++;
|
||||
}
|
||||
|
||||
count = snprintf_count(hex_str_ptr, hex_str_remaining, " ");
|
||||
hex_str_remaining -= count;
|
||||
hex_str_ptr += count;
|
||||
|
||||
payload_ptr = payload;
|
||||
while (payload_ptr < payload_max_to_log_ptr) {
|
||||
count = snprintf_count(hex_str_ptr, hex_str_remaining, "%c",
|
||||
*payload_ptr >= ' ' ? *payload_ptr : '.');
|
||||
hex_str_remaining -= count;
|
||||
hex_str_ptr += count;
|
||||
payload_ptr++;
|
||||
}
|
||||
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_log_payload: %s: length=%u: data: %s%s",
|
||||
payload_name,
|
||||
length,
|
||||
hex_str,
|
||||
(length > max_length_to_log ? " (...)" : " "));
|
||||
} else {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_log_payload: %s: length=%u: data: <empty payload>",
|
||||
payload_name,
|
||||
length);
|
||||
}
|
||||
}
|
||||
|
||||
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
struct aux_payload *payload)
|
||||
{
|
||||
@ -648,7 +705,34 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
}
|
||||
|
||||
for (i = 0; i < AUX_MAX_RETRIES; i++) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
|
||||
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
|
||||
i + 1,
|
||||
(int)AUX_MAX_RETRIES,
|
||||
payload->address,
|
||||
payload->length,
|
||||
(unsigned int) payload->write,
|
||||
(unsigned int) payload->mot);
|
||||
if (payload->write)
|
||||
dce_aux_log_payload(" write", payload->data, payload->length, 16);
|
||||
ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
|
||||
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
|
||||
i + 1,
|
||||
(int)AUX_MAX_RETRIES,
|
||||
payload->address,
|
||||
payload->length,
|
||||
(unsigned int) payload->write,
|
||||
(unsigned int) payload->mot,
|
||||
ret,
|
||||
(int)operation_result,
|
||||
(unsigned int) *payload->reply);
|
||||
if (!payload->write)
|
||||
dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
|
||||
|
||||
switch (operation_result) {
|
||||
case AUX_RET_SUCCESS:
|
||||
@ -657,30 +741,64 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
|
||||
switch (*payload->reply) {
|
||||
case AUX_TRANSACTION_REPLY_AUX_ACK:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_ACK");
|
||||
if (!payload->write && payload->length != ret) {
|
||||
if (++aux_ack_retries >= AUX_MAX_RETRIES)
|
||||
if (++aux_ack_retries >= AUX_MAX_RETRIES) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE: aux_ack_retries=%d >= AUX_MAX_RETRIES=%d",
|
||||
aux_defer_retries,
|
||||
AUX_MAX_RETRIES);
|
||||
goto fail;
|
||||
else
|
||||
} else {
|
||||
udelay(300);
|
||||
}
|
||||
} else
|
||||
return true;
|
||||
break;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_AUX_DEFER:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER");
|
||||
|
||||
/* polling_timeout_period is in us */
|
||||
defer_time_in_ms += aux110->polling_timeout_period / 1000;
|
||||
++aux_defer_retries;
|
||||
fallthrough;
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
|
||||
if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER");
|
||||
|
||||
retry_on_defer = true;
|
||||
fallthrough;
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
|
||||
if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK)
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
|
||||
|
||||
if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
|
||||
&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d && defer_time_in_ms=%d >= AUX_MAX_DEFER_TIMEOUT_MS=%d",
|
||||
aux_defer_retries,
|
||||
AUX_MIN_DEFER_RETRIES,
|
||||
defer_time_in_ms,
|
||||
AUX_MAX_DEFER_TIMEOUT_MS);
|
||||
goto fail;
|
||||
} else {
|
||||
if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
|
||||
(*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: payload->defer_delay=%u",
|
||||
payload->defer_delay);
|
||||
if (payload->defer_delay > 1) {
|
||||
msleep(payload->defer_delay);
|
||||
defer_time_in_ms += payload->defer_delay;
|
||||
@ -693,37 +811,86 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
break;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_I2C_DEFER:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_DEFER");
|
||||
|
||||
aux_defer_retries = 0;
|
||||
if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
|
||||
if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE: aux_i2c_defer_retries=%d >= AUX_MAX_I2C_DEFER_RETRIES=%d",
|
||||
aux_i2c_defer_retries,
|
||||
AUX_MAX_I2C_DEFER_RETRIES);
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_AUX_NACK:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_NACK");
|
||||
goto fail;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_HPD_DISCON:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_HPD_DISCON");
|
||||
goto fail;
|
||||
|
||||
default:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case.");
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
|
||||
case AUX_RET_ERROR_INVALID_REPLY:
|
||||
if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_ERROR_INVALID_REPLY");
|
||||
if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE: aux_invalid_reply_retries=%d >= AUX_MAX_INVALID_REPLY_RETRIES=%d",
|
||||
aux_invalid_reply_retries,
|
||||
AUX_MAX_INVALID_REPLY_RETRIES);
|
||||
goto fail;
|
||||
else
|
||||
} else
|
||||
udelay(400);
|
||||
break;
|
||||
|
||||
case AUX_RET_ERROR_TIMEOUT:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: AUX_RET_ERROR_TIMEOUT");
|
||||
// Check whether a DEFER had occurred before the timeout.
|
||||
// If so, treat timeout as a DEFER.
|
||||
if (retry_on_defer) {
|
||||
if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES)
|
||||
if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d",
|
||||
aux_defer_retries,
|
||||
AUX_MIN_DEFER_RETRIES);
|
||||
goto fail;
|
||||
else if (payload->defer_delay > 0)
|
||||
} else if (payload->defer_delay > 0) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: payload->defer_delay=%u",
|
||||
payload->defer_delay);
|
||||
msleep(payload->defer_delay);
|
||||
}
|
||||
} else {
|
||||
if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
|
||||
if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) {
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE: aux_timeout_retries=%d >= AUX_MAX_TIMEOUT_RETRIES=%d",
|
||||
aux_timeout_retries,
|
||||
AUX_MAX_TIMEOUT_RETRIES);
|
||||
goto fail;
|
||||
else {
|
||||
} else {
|
||||
/*
|
||||
* DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
|
||||
* According to the DP spec there should be 3 retries total
|
||||
@ -738,11 +905,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
case AUX_RET_ERROR_ENGINE_ACQUIRE:
|
||||
case AUX_RET_ERROR_UNKNOWN:
|
||||
default:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
LOG_FLAG_I2cAux_DceAux,
|
||||
"dce_aux_transfer_with_retries: Failure: operation_result=%d",
|
||||
(int)operation_result);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
LOG_FLAG_Error_I2cAux,
|
||||
"dce_aux_transfer_with_retries: FAILURE");
|
||||
if (!payload_reply)
|
||||
payload->reply = NULL;
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "dmub/dmub_srv.h"
|
||||
#include "core_types.h"
|
||||
|
||||
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
|
||||
#define DC_TRACE_LEVEL_MESSAGE(...) do {} while (0) /* do nothing */
|
||||
|
||||
#define MAX_PIPES 6
|
||||
|
||||
|
@ -3641,13 +3641,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,
|
||||
struct dc_clock_config clock_cfg = {0};
|
||||
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
|
||||
|
||||
if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
|
||||
dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
|
||||
context, clock_type, &clock_cfg);
|
||||
|
||||
if (!dc->clk_mgr->funcs->get_clock)
|
||||
if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
|
||||
return DC_FAIL_UNSUPPORTED_1;
|
||||
|
||||
dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
|
||||
context, clock_type, &clock_cfg);
|
||||
|
||||
if (clk_khz > clock_cfg.max_clock_khz)
|
||||
return DC_FAIL_CLK_EXCEED_MAX;
|
||||
|
||||
@ -3665,7 +3664,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
|
||||
else
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
||||
if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
|
||||
if (dc->clk_mgr->funcs->update_clocks)
|
||||
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
|
||||
context, true);
|
||||
return DC_OK;
|
||||
|
@ -1723,13 +1723,15 @@ void dcn20_program_front_end_for_ctx(
|
||||
|
||||
pipe = pipe->bottom_pipe;
|
||||
}
|
||||
/* Program secondary blending tree and writeback pipes */
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
|
||||
&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
|
||||
&& hws->funcs.program_all_writeback_pipes_in_tree)
|
||||
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
|
||||
}
|
||||
/* Program secondary blending tree and writeback pipes */
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
if (!pipe->top_pipe && !pipe->prev_odm_pipe
|
||||
&& pipe->stream && pipe->stream->num_wb_info > 0
|
||||
&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
|
||||
|| pipe->stream->update_flags.raw)
|
||||
&& hws->funcs.program_all_writeback_pipes_in_tree)
|
||||
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,11 @@
|
||||
static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
|
||||
struct dcn3_xfer_func_reg *reg)
|
||||
{
|
||||
reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
|
||||
reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
|
||||
reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
|
||||
reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
|
||||
|
||||
reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
|
||||
reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
|
||||
reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
|
||||
@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
|
||||
reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
|
||||
reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
|
||||
reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
|
||||
reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
|
||||
reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
|
||||
reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
|
||||
reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
|
||||
reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
|
||||
@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(
|
||||
uint32_t state_mode;
|
||||
uint32_t ram_select;
|
||||
|
||||
REG_GET(DWB_OGAM_CONTROL,
|
||||
DWB_OGAM_MODE, &state_mode);
|
||||
REG_GET(DWB_OGAM_CONTROL,
|
||||
DWB_OGAM_SELECT, &ram_select);
|
||||
REG_GET_2(DWB_OGAM_CONTROL,
|
||||
DWB_OGAM_MODE_CURRENT, &state_mode,
|
||||
DWB_OGAM_SELECT_CURRENT, &ram_select);
|
||||
|
||||
if (state_mode == 0) {
|
||||
mode = LUT_BYPASS;
|
||||
} else if (state_mode == 2) {
|
||||
if (ram_select == 0)
|
||||
mode = LUT_RAM_A;
|
||||
else
|
||||
else if (ram_select == 1)
|
||||
mode = LUT_RAM_B;
|
||||
else
|
||||
mode = LUT_BYPASS;
|
||||
} else {
|
||||
// Reserved value
|
||||
mode = LUT_BYPASS;
|
||||
@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(
|
||||
struct dcn30_dwbc *dwbc30,
|
||||
bool is_ram_a)
|
||||
{
|
||||
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
||||
DWB_OGAM_LUT_READ_COLOR_SEL, 7);
|
||||
REG_UPDATE(DWB_OGAM_CONTROL,
|
||||
DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
|
||||
REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
|
||||
DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
|
||||
DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
|
||||
}
|
||||
|
||||
@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
// triple base implementation
|
||||
for (i = 0; i < num/2; i++) {
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
|
||||
uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
|
||||
uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
|
||||
uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
|
||||
|
||||
if (is_rgb_equal(rgb, num)) {
|
||||
for (i = 0 ; i < num; i++)
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
|
||||
|
||||
} else {
|
||||
|
||||
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
||||
DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
|
||||
|
||||
for (i = 0 ; i < num; i++)
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
|
||||
|
||||
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
||||
DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
|
||||
|
||||
for (i = 0 ; i < num; i++)
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
|
||||
|
||||
REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
||||
DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
|
||||
|
||||
for (i = 0 ; i < num; i++)
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
|
||||
|
||||
REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(
|
||||
return false;
|
||||
}
|
||||
|
||||
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
|
||||
|
||||
current_mode = dwb3_get_ogam_current(dwbc30);
|
||||
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
|
||||
next_mode = LUT_RAM_B;
|
||||
@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(
|
||||
dwb3_program_ogam_pwl(
|
||||
dwbc30, params->rgb_resulted, params->hw_points_num);
|
||||
|
||||
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
|
||||
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
|
||||
REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(
|
||||
|
||||
struct color_matrices_reg gam_regs;
|
||||
|
||||
REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
|
||||
|
||||
if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
|
||||
REG_SET(DWB_GAMUT_REMAP_MODE, 0,
|
||||
DWB_GAMUT_REMAP_MODE, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
|
||||
|
||||
gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
|
||||
gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
|
||||
gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
|
||||
gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
|
||||
|
||||
switch (select) {
|
||||
case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
|
||||
gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
|
||||
|
@ -398,12 +398,22 @@ void dcn30_program_all_writeback_pipes_in_tree(
|
||||
for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
|
||||
|
||||
if (!pipe_ctx->plane_state)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
|
||||
wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(wb_info.mpcc_inst != -1);
|
||||
|
||||
if (wb_info.mpcc_inst == -1) {
|
||||
/* Disable writeback pipe and disconnect from MPCC
|
||||
* if source plane has been removed
|
||||
*/
|
||||
dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
|
||||
dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
|
||||
|
@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
|
||||
}
|
||||
pri_pipe->next_odm_pipe = sec_pipe;
|
||||
sec_pipe->prev_odm_pipe = pri_pipe;
|
||||
ASSERT(sec_pipe->top_pipe == NULL);
|
||||
|
||||
if (!sec_pipe->top_pipe)
|
||||
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
|
||||
|
@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
|
||||
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
|
||||
}
|
||||
|
||||
static void calculate_wm_set_for_vlevel(
|
||||
int vlevel,
|
||||
struct wm_range_table_entry *table_entry,
|
||||
struct dcn_watermarks *wm_set,
|
||||
struct display_mode_lib *dml,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt)
|
||||
{
|
||||
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
|
||||
|
||||
ASSERT(vlevel < dml->soc.num_states);
|
||||
/* only pipe 0 is read for voltage and dcf/soc clocks */
|
||||
pipes[0].clks_cfg.voltage = vlevel;
|
||||
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
|
||||
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
|
||||
|
||||
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
|
||||
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
|
||||
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
|
||||
|
||||
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
|
||||
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
|
||||
|
||||
}
|
||||
|
||||
static void dcn301_calculate_wm_and_dlg(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel_req)
|
||||
{
|
||||
int i, pipe_idx;
|
||||
int vlevel, vlevel_max;
|
||||
struct wm_range_table_entry *table_entry;
|
||||
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
|
||||
|
||||
ASSERT(bw_params);
|
||||
|
||||
vlevel_max = bw_params->clk_table.num_entries - 1;
|
||||
|
||||
/* WM Set D */
|
||||
table_entry = &bw_params->wm_table.entries[WM_D];
|
||||
if (table_entry->wm_type == WM_TYPE_RETRAINING)
|
||||
vlevel = 0;
|
||||
else
|
||||
vlevel = vlevel_max;
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
/* WM Set C */
|
||||
table_entry = &bw_params->wm_table.entries[WM_C];
|
||||
vlevel = min(max(vlevel_req, 2), vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
/* WM Set B */
|
||||
table_entry = &bw_params->wm_table.entries[WM_B];
|
||||
vlevel = min(max(vlevel_req, 1), vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
/* WM Set A */
|
||||
table_entry = &bw_params->wm_table.entries[WM_A];
|
||||
vlevel = min(vlevel_req, vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
|
||||
if (dc->config.forced_clocks) {
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
|
||||
}
|
||||
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
|
||||
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
|
||||
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
|
||||
}
|
||||
|
||||
static struct resource_funcs dcn301_res_pool_funcs = {
|
||||
.destroy = dcn301_destroy_resource_pool,
|
||||
.link_enc_create = dcn301_link_encoder_create,
|
||||
.panel_cntl_create = dcn301_panel_cntl_create,
|
||||
.validate_bandwidth = dcn30_validate_bandwidth,
|
||||
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
|
||||
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
|
||||
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
|
||||
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
|
@ -407,6 +407,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
|
||||
&pipe_ctx->stream_res.encoder_info_frame);
|
||||
}
|
||||
}
|
||||
void dcn31_z10_save_init(struct dc *dc)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
|
||||
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
||||
void dcn31_z10_restore(struct dc *dc)
|
||||
{
|
||||
|
@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
|
||||
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dcn31_z10_restore(struct dc *dc);
|
||||
void dcn31_z10_save_init(struct dc *dc);
|
||||
|
||||
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
|
||||
int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
|
||||
|
@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.z10_restore = dcn31_z10_restore,
|
||||
.z10_save_init = dcn31_z10_save_init,
|
||||
.is_abm_supported = dcn31_is_abm_supported,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
|
@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
|
||||
int width, int height, int offset);
|
||||
|
||||
void (*z10_restore)(struct dc *dc);
|
||||
void (*z10_save_init)(struct dc *dc);
|
||||
|
||||
void (*update_visual_confirm_color)(struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
|
@ -47,10 +47,10 @@
|
||||
|
||||
/* Firmware versioning. */
|
||||
#ifdef DMUB_EXPOSE_VERSION
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0x6d13d5e2c
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc
|
||||
#define DMUB_FW_VERSION_MAJOR 0
|
||||
#define DMUB_FW_VERSION_MINOR 0
|
||||
#define DMUB_FW_VERSION_REVISION 77
|
||||
#define DMUB_FW_VERSION_REVISION 79
|
||||
#define DMUB_FW_VERSION_TEST 0
|
||||
#define DMUB_FW_VERSION_VBIOS 0
|
||||
#define DMUB_FW_VERSION_HOTFIX 0
|
||||
@ -322,6 +322,10 @@ union dmub_fw_boot_status {
|
||||
uint32_t mailbox_rdy : 1; /**< 1 if mailbox ready */
|
||||
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
|
||||
uint32_t restore_required : 1; /**< 1 if driver should call restore */
|
||||
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
|
||||
uint32_t reserved : 1;
|
||||
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
|
||||
|
||||
} bits; /**< status bits */
|
||||
uint32_t all; /**< 32-bit access to status bits */
|
||||
};
|
||||
@ -335,6 +339,7 @@ enum dmub_fw_boot_status_bit {
|
||||
DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */
|
||||
DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
|
||||
DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */
|
||||
DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
|
||||
};
|
||||
|
||||
/* Register bit definition for SCRATCH5 */
|
||||
@ -489,6 +494,11 @@ enum dmub_gpint_command {
|
||||
* RETURN: PSR residency in milli-percent.
|
||||
*/
|
||||
DMUB_GPINT__PSR_RESIDENCY = 9,
|
||||
|
||||
/**
|
||||
* DESC: Notifies DMCUB detection is done so detection required can be cleared.
|
||||
*/
|
||||
DMUB_GPINT__NOTIFY_DETECTION_DONE = 12,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -860,6 +870,11 @@ enum dmub_cmd_idle_opt_type {
|
||||
* DCN hardware restore.
|
||||
*/
|
||||
DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
|
||||
|
||||
/**
|
||||
* DCN hardware save.
|
||||
*/
|
||||
DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1438,7 +1453,7 @@ struct dmub_cmd_psr_set_level_data {
|
||||
* 16-bit value dicated by driver that will enable/disable different functionality.
|
||||
*/
|
||||
uint16_t psr_level;
|
||||
/**
|
||||
/**
|
||||
* PSR control version.
|
||||
*/
|
||||
uint8_t cmd_version;
|
||||
|
@ -83,7 +83,7 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
|
||||
void dmub_dcn31_reset(struct dmub_srv *dmub)
|
||||
{
|
||||
union dmub_gpint_data_register cmd;
|
||||
const uint32_t timeout = 30;
|
||||
const uint32_t timeout = 100;
|
||||
uint32_t in_reset, scratch, i;
|
||||
|
||||
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
|
||||
@ -98,26 +98,22 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
|
||||
/**
|
||||
* Timeout covers both the ACK and the wait
|
||||
* for remaining work to finish.
|
||||
*
|
||||
* This is mostly bound by the PHY disable sequence.
|
||||
* Each register check will be greater than 1us, so
|
||||
* don't bother using udelay.
|
||||
*/
|
||||
|
||||
for (i = 0; i < timeout; ++i) {
|
||||
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
|
||||
break;
|
||||
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
for (i = 0; i < timeout; ++i) {
|
||||
scratch = dmub->hw_funcs.get_gpint_response(dmub);
|
||||
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Clear the GPINT command manually so we don't reset again. */
|
||||
cmd.all = 0;
|
||||
dmub->hw_funcs.set_gpint(dmub, cmd);
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* Force reset in case we timed out, DMCUB is likely hung. */
|
||||
}
|
||||
@ -130,6 +126,10 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
|
||||
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
|
||||
REG_WRITE(DMCUB_SCRATCH0, 0);
|
||||
|
||||
/* Clear the GPINT command manually so we don't send anything during boot. */
|
||||
cmd.all = 0;
|
||||
dmub->hw_funcs.set_gpint(dmub, cmd);
|
||||
}
|
||||
|
||||
void dmub_dcn31_reset_release(struct dmub_srv *dmub)
|
||||
|
@ -145,6 +145,7 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
|
||||
} else {
|
||||
callback_in_ms(0, output);
|
||||
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
|
||||
set_auth_complete(hdcp, output);
|
||||
}
|
||||
else if (is_hdmi_dvi_sl_hdcp(hdcp))
|
||||
if (is_cp_desired_hdcp2(hdcp)) {
|
||||
@ -156,10 +157,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
|
||||
} else {
|
||||
callback_in_ms(0, output);
|
||||
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
|
||||
set_auth_complete(hdcp, output);
|
||||
}
|
||||
else {
|
||||
callback_in_ms(0, output);
|
||||
set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
|
||||
set_auth_complete(hdcp, output);
|
||||
}
|
||||
} else if (is_in_cp_not_desired_state(hdcp)) {
|
||||
increment_stay_counter(hdcp);
|
||||
@ -520,7 +523,7 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
|
||||
|
||||
/* reset authentication if needed */
|
||||
if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
mod_hdcp_log_ddc_trace(hdcp);
|
||||
reset_status = reset_authentication(hdcp, output);
|
||||
if (reset_status != MOD_HDCP_STATUS_SUCCESS)
|
||||
push_error_status(hdcp, reset_status);
|
||||
|
@ -324,6 +324,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
|
||||
/* log functions */
|
||||
void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
|
||||
uint8_t *buf, uint32_t buf_size);
|
||||
void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp);
|
||||
/* TODO: add adjustment log */
|
||||
|
||||
/* psp functions */
|
||||
@ -494,6 +495,13 @@ static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
|
||||
output->watchdog_timer_delay = time;
|
||||
}
|
||||
|
||||
static inline void set_auth_complete(struct mod_hdcp *hdcp,
|
||||
struct mod_hdcp_output *output)
|
||||
{
|
||||
output->auth_complete = 1;
|
||||
mod_hdcp_log_ddc_trace(hdcp);
|
||||
}
|
||||
|
||||
/* connection topology helpers */
|
||||
static inline uint8_t is_display_active(struct mod_hdcp_display *display)
|
||||
{
|
||||
|
@ -89,7 +89,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
|
||||
} else {
|
||||
callback_in_ms(0, output);
|
||||
set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
set_auth_complete(hdcp, output);
|
||||
}
|
||||
break;
|
||||
case H1_A45_AUTHENTICATED:
|
||||
@ -137,7 +137,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
|
||||
}
|
||||
callback_in_ms(0, output);
|
||||
set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
set_auth_complete(hdcp, output);
|
||||
break;
|
||||
default:
|
||||
status = MOD_HDCP_STATUS_INVALID_STATE;
|
||||
@ -239,7 +239,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
|
||||
set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
|
||||
} else {
|
||||
set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
set_auth_complete(hdcp, output);
|
||||
}
|
||||
break;
|
||||
case D1_A4_AUTHENTICATED:
|
||||
@ -311,7 +311,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
|
||||
break;
|
||||
}
|
||||
set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
set_auth_complete(hdcp, output);
|
||||
break;
|
||||
default:
|
||||
fail_and_restart_in_ms(0, &status, output);
|
||||
|
@ -242,7 +242,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
|
||||
}
|
||||
callback_in_ms(0, output);
|
||||
set_state_id(hdcp, output, H2_A5_AUTHENTICATED);
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
set_auth_complete(hdcp, output);
|
||||
break;
|
||||
case H2_A5_AUTHENTICATED:
|
||||
if (input->rxstatus_read == FAIL ||
|
||||
@ -559,7 +559,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
|
||||
break;
|
||||
}
|
||||
set_state_id(hdcp, output, D2_A5_AUTHENTICATED);
|
||||
HDCP_FULL_DDC_TRACE(hdcp);
|
||||
set_auth_complete(hdcp, output);
|
||||
break;
|
||||
case D2_A5_AUTHENTICATED:
|
||||
if (input->rxstatus_read == FAIL ||
|
||||
|
@ -51,6 +51,80 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
|
||||
}
|
||||
}
|
||||
|
||||
void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
|
||||
{
|
||||
if (is_hdcp1(hdcp)) {
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv,
|
||||
sizeof(hdcp->auth.msg.hdcp1.bksv));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps,
|
||||
sizeof(hdcp->auth.msg.hdcp1.bcaps));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BSTATUS",
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
|
||||
sizeof(hdcp->auth.msg.hdcp1.bstatus));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an,
|
||||
sizeof(hdcp->auth.msg.hdcp1.an));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv,
|
||||
sizeof(hdcp->auth.msg.hdcp1.aksv));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo,
|
||||
sizeof(hdcp->auth.msg.hdcp1.ainfo));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'",
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
|
||||
sizeof(hdcp->auth.msg.hdcp1.r0p));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BINFO",
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
|
||||
sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist,
|
||||
hdcp->auth.msg.hdcp1.ksvlist_size);
|
||||
HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp,
|
||||
sizeof(hdcp->auth.msg.hdcp1.vp));
|
||||
} else if (is_hdcp2(hdcp)) {
|
||||
HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version",
|
||||
&hdcp->auth.msg.hdcp2.hdcp2version_hdmi,
|
||||
sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp,
|
||||
sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_init));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_cert));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM",
|
||||
hdcp->auth.msg.hdcp2.ake_stored_km,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM",
|
||||
hdcp->auth.msg.hdcp2.ake_no_stored_km,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Pairing Info",
|
||||
hdcp->auth.msg.hdcp2.ake_pairing_info,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init,
|
||||
sizeof(hdcp->auth.msg.hdcp2.lc_init));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime,
|
||||
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks,
|
||||
sizeof(hdcp->auth.msg.hdcp2.ske_eks));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Rx Status",
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus,
|
||||
sizeof(hdcp->auth.msg.hdcp2.rxstatus));
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Rx Id List",
|
||||
hdcp->auth.msg.hdcp2.rx_id_list,
|
||||
hdcp->auth.msg.hdcp2.rx_id_list_size);
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack",
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_ack,
|
||||
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management",
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
|
||||
hdcp->auth.msg.hdcp2.stream_manage_size);
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Stream Ready",
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready,
|
||||
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready));
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type",
|
||||
hdcp->auth.msg.hdcp2.content_stream_type_dp,
|
||||
sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
|
||||
}
|
||||
}
|
||||
|
||||
char *mod_hdcp_status_to_str(int32_t status)
|
||||
{
|
||||
switch (status) {
|
||||
|
@ -106,78 +106,6 @@
|
||||
hdcp->config.index, msg_name,\
|
||||
hdcp->buf); \
|
||||
} while (0)
|
||||
#define HDCP_FULL_DDC_TRACE(hdcp) do { \
|
||||
if (is_hdcp1(hdcp)) { \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.bksv)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.an)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.aksv)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.r0p)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
|
||||
hdcp->auth.msg.hdcp1.ksvlist_size); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
|
||||
sizeof(hdcp->auth.msg.hdcp1.vp)); \
|
||||
} else { \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \
|
||||
&hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_init)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \
|
||||
hdcp->auth.msg.hdcp2.ake_stored_km, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \
|
||||
hdcp->auth.msg.hdcp2.ake_no_stored_km, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \
|
||||
hdcp->auth.msg.hdcp2.ake_pairing_info, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.lc_init)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \
|
||||
(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \
|
||||
hdcp->auth.msg.hdcp2.rx_id_list, \
|
||||
hdcp->auth.msg.hdcp2.rx_id_list_size); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_ack, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \
|
||||
hdcp->auth.msg.hdcp2.stream_manage_size); \
|
||||
HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \
|
||||
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \
|
||||
HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \
|
||||
hdcp->auth.msg.hdcp2.content_stream_type_dp, \
|
||||
sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \
|
||||
} \
|
||||
} while (0)
|
||||
#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
|
||||
HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
|
||||
hdcp->config.index, i)
|
||||
|
@ -54,7 +54,7 @@ static enum mod_hdcp_status remove_display_from_topology_v2(
|
||||
get_active_display_at_index(hdcp, index);
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
||||
|
||||
if (!display || !is_display_active(display))
|
||||
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
|
||||
@ -90,7 +90,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
|
||||
get_active_display_at_index(hdcp, index);
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
||||
|
||||
if (!display || !is_display_active(display))
|
||||
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
|
||||
@ -128,13 +128,13 @@ static enum mod_hdcp_status add_display_to_topology_v2(
|
||||
struct mod_hdcp_link *link = &hdcp->connection.link;
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
if (!psp->dtm_context.context.initialized) {
|
||||
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
||||
|
||||
mutex_lock(&psp->dtm_context.mutex);
|
||||
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
||||
@ -175,13 +175,13 @@ static enum mod_hdcp_status add_display_to_topology_v3(
|
||||
struct mod_hdcp_link *link = &hdcp->connection.link;
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
if (!psp->dtm_context.context.initialized) {
|
||||
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
|
||||
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
|
||||
|
||||
mutex_lock(&psp->dtm_context.mutex);
|
||||
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
|
||||
@ -253,12 +253,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
|
||||
struct ta_hdcp_shared_memory *hdcp_cmd;
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
@ -293,7 +293,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
|
||||
@ -325,7 +325,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
|
||||
@ -367,7 +367,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
|
||||
@ -393,7 +393,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
|
||||
@ -436,7 +436,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
|
||||
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
|
||||
|
||||
@ -471,7 +471,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
@ -498,7 +498,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
|
||||
if (!psp->hdcp_context.hdcp_initialized) {
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
@ -508,7 +508,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
|
||||
@ -545,7 +545,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id;
|
||||
@ -579,7 +579,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -611,7 +611,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -671,7 +671,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -717,7 +717,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -750,7 +750,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -785,7 +785,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -833,7 +833,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
|
||||
@ -862,7 +862,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -914,7 +914,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -958,7 +958,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
@ -994,7 +994,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
|
||||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
mutex_lock(&psp->hdcp_context.mutex);
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
|
||||
|
@ -225,6 +225,7 @@ struct mod_hdcp_output {
|
||||
uint8_t watchdog_timer_stop;
|
||||
uint16_t callback_delay;
|
||||
uint16_t watchdog_timer_delay;
|
||||
uint8_t auth_complete;
|
||||
};
|
||||
|
||||
/* used to represent per display info */
|
||||
|
@ -38,6 +38,9 @@
|
||||
#define mmCG_TACH_CTRL 0x006a
|
||||
#define mmCG_TACH_CTRL_BASE_IDX 0
|
||||
|
||||
#define mmCG_TACH_STATUS 0x006b
|
||||
#define mmCG_TACH_STATUS_BASE_IDX 0
|
||||
|
||||
#define mmTHM_THERMAL_INT_ENA 0x000a
|
||||
#define mmTHM_THERMAL_INT_ENA_BASE_IDX 0
|
||||
#define mmTHM_THERMAL_INT_CTRL 0x000b
|
||||
@ -49,4 +52,7 @@
|
||||
#define mmTHM_BACO_CNTL 0x0081
|
||||
#define mmTHM_BACO_CNTL_BASE_IDX 0
|
||||
|
||||
#define mmCG_THERMAL_STATUS 0x006C
|
||||
#define mmCG_THERMAL_STATUS_BASE_IDX 0
|
||||
|
||||
#endif
|
||||
|
@ -92,5 +92,8 @@
|
||||
#define THM_TCON_THERM_TRIP__RSVD3_MASK 0x7FFFC000L
|
||||
#define THM_TCON_THERM_TRIP__SW_THERM_TP_MASK 0x80000000L
|
||||
|
||||
#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
|
||||
#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -44,6 +44,7 @@ struct kgd_mem;
|
||||
enum kfd_preempt_type {
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
|
||||
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE
|
||||
};
|
||||
|
||||
struct kfd_vm_fault_info {
|
||||
@ -298,6 +299,8 @@ struct kfd2kgd_calls {
|
||||
|
||||
void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
|
||||
int *max_waves_per_cu);
|
||||
void (*program_trap_handler_settings)(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
|
||||
};
|
||||
|
||||
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
|
||||
|
@ -306,8 +306,8 @@ struct amd_pm_funcs {
|
||||
/* export for sysfs */
|
||||
void (*set_fan_control_mode)(void *handle, u32 mode);
|
||||
u32 (*get_fan_control_mode)(void *handle);
|
||||
int (*set_fan_speed_percent)(void *handle, u32 speed);
|
||||
int (*get_fan_speed_percent)(void *handle, u32 *speed);
|
||||
int (*set_fan_speed_pwm)(void *handle, u32 speed);
|
||||
int (*get_fan_speed_pwm)(void *handle, u32 *speed);
|
||||
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
|
||||
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
|
||||
int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
|
||||
|
@ -2094,14 +2094,19 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
}
|
||||
|
||||
if (asic_type == CHIP_ARCTURUS) {
|
||||
/* Arcturus does not support standalone mclk/socclk/fclk level setting */
|
||||
switch (asic_type) {
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_ALDEBARAN:
|
||||
/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
|
||||
if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
|
||||
DEVICE_ATTR_IS(pp_dpm_socclk) ||
|
||||
DEVICE_ATTR_IS(pp_dpm_fclk)) {
|
||||
dev_attr->attr.mode &= ~S_IWUGO;
|
||||
dev_attr->store = NULL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
|
||||
@ -2379,7 +2384,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return sprintf(buf, "%u\n", pwm_mode);
|
||||
return sysfs_emit(buf, "%u\n", pwm_mode);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||||
@ -2424,14 +2429,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%i\n", 0);
|
||||
return sysfs_emit(buf, "%i\n", 0);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%i\n", 255);
|
||||
return sysfs_emit(buf, "%i\n", 255);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||
@ -2469,10 +2474,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
value = (value * 100) / 255;
|
||||
|
||||
if (adev->powerplay.pp_funcs->set_fan_speed_percent)
|
||||
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
|
||||
if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
|
||||
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
||||
@ -2504,8 +2507,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_fan_speed_percent)
|
||||
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
|
||||
if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
|
||||
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
||||
@ -2515,9 +2518,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
speed = (speed * 255) / 100;
|
||||
|
||||
return sprintf(buf, "%i\n", speed);
|
||||
return sysfs_emit(buf, "%i\n", speed);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||
@ -2550,7 +2551,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return sprintf(buf, "%i\n", speed);
|
||||
return sysfs_emit(buf, "%i\n", speed);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
|
||||
@ -2647,7 +2648,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return sprintf(buf, "%i\n", rpm);
|
||||
return sysfs_emit(buf, "%i\n", rpm);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||
@ -2729,7 +2730,7 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
|
||||
return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||
@ -2899,7 +2900,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%i\n", 0);
|
||||
return sysfs_emit(buf, "%i\n", 0);
|
||||
}
|
||||
|
||||
|
||||
@ -3174,6 +3175,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
|
||||
*
|
||||
* - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
|
||||
*
|
||||
* NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
|
||||
* That will get the former one overridden.
|
||||
*
|
||||
* hwmon interfaces for GPU clocks:
|
||||
*
|
||||
* - freq1_input: the gfx/compute clock in hertz
|
||||
@ -3349,13 +3353,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
|
||||
if (!is_support_sw_smu(adev)) {
|
||||
/* mask fan attributes if we have no bindings for this asic to expose */
|
||||
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
|
||||
if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
|
||||
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
|
||||
effective_mode &= ~S_IRUGO;
|
||||
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
|
||||
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
|
||||
@ -3379,8 +3383,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
|
||||
if (!is_support_sw_smu(adev)) {
|
||||
/* hide max/min values if we can't both query and manage the fan */
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
|
||||
(!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
|
@ -280,11 +280,11 @@ enum amdgpu_pcie_gen {
|
||||
#define amdgpu_dpm_get_fan_control_mode(adev) \
|
||||
((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
|
||||
|
||||
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
|
||||
#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
|
||||
#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
|
||||
|
||||
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
|
||||
|
@ -34,6 +34,8 @@
|
||||
#define SMU_FW_NAME_LEN 0x24
|
||||
|
||||
#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
|
||||
#define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1)
|
||||
#define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2)
|
||||
|
||||
// Power Throttlers
|
||||
#define SMU_THROTTLER_PPT0_BIT 0
|
||||
@ -229,7 +231,8 @@ enum smu_memory_pool_size
|
||||
struct smu_user_dpm_profile {
|
||||
uint32_t fan_mode;
|
||||
uint32_t power_limit;
|
||||
uint32_t fan_speed_percent;
|
||||
uint32_t fan_speed_pwm;
|
||||
uint32_t fan_speed_rpm;
|
||||
uint32_t flags;
|
||||
uint32_t user_od;
|
||||
|
||||
@ -540,7 +543,7 @@ struct smu_context
|
||||
struct work_struct interrupt_work;
|
||||
|
||||
unsigned fan_max_rpm;
|
||||
unsigned manual_fan_speed_percent;
|
||||
unsigned manual_fan_speed_pwm;
|
||||
|
||||
uint32_t gfx_default_hard_min_freq;
|
||||
uint32_t gfx_default_soft_max_freq;
|
||||
@ -722,9 +725,14 @@ struct pptable_funcs {
|
||||
bool (*is_dpm_running)(struct smu_context *smu);
|
||||
|
||||
/**
|
||||
* @get_fan_speed_percent: Get the current fan speed in percent.
|
||||
* @get_fan_speed_pwm: Get the current fan speed in PWM.
|
||||
*/
|
||||
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
|
||||
int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
|
||||
|
||||
/**
|
||||
* @get_fan_speed_rpm: Get the current fan speed in rpm.
|
||||
*/
|
||||
int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
|
||||
|
||||
/**
|
||||
* @set_watermarks_table: Configure and upload the watermarks tables to
|
||||
@ -1043,9 +1051,14 @@ struct pptable_funcs {
|
||||
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
|
||||
|
||||
/**
|
||||
* @set_fan_speed_percent: Set a static fan speed in percent.
|
||||
* @set_fan_speed_pwm: Set a static fan speed in PWM.
|
||||
*/
|
||||
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
|
||||
int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
|
||||
|
||||
/**
|
||||
* @set_fan_speed_rpm: Set a static fan speed in rpm.
|
||||
*/
|
||||
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
|
||||
|
||||
/**
|
||||
* @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
|
||||
|
@ -278,9 +278,9 @@ struct pp_hwmgr_func {
|
||||
int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
|
||||
void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
|
||||
uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
|
||||
int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
|
||||
int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
|
||||
int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
|
||||
int (*set_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
int (*get_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
|
||||
int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
|
||||
int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr);
|
||||
int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr);
|
||||
|
@ -298,7 +298,6 @@ enum smu_clk_type {
|
||||
__SMU_DUMMY_MAP(DS_FCLK), \
|
||||
__SMU_DUMMY_MAP(DS_MP1CLK), \
|
||||
__SMU_DUMMY_MAP(DS_MP0CLK), \
|
||||
__SMU_DUMMY_MAP(XGMI), \
|
||||
__SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \
|
||||
__SMU_DUMMY_MAP(DPM_GFX_PACE), \
|
||||
__SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \
|
||||
|
@ -221,9 +221,18 @@ int
|
||||
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
|
||||
uint32_t mode);
|
||||
|
||||
int smu_v11_0_set_fan_speed_percent(struct smu_context *smu,
|
||||
int smu_v11_0_set_fan_speed_pwm(struct smu_context *smu,
|
||||
uint32_t speed);
|
||||
|
||||
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t speed);
|
||||
|
||||
int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
|
||||
uint32_t *speed);
|
||||
|
||||
int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t *speed);
|
||||
|
||||
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
|
||||
uint32_t pstate);
|
||||
|
||||
|
@ -533,7 +533,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
|
||||
return mode;
|
||||
}
|
||||
|
||||
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
|
||||
static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
int ret = 0;
|
||||
@ -541,17 +541,17 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
|
||||
if (!hwmgr || !hwmgr->pm_en)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
|
||||
if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
|
||||
pr_info_ratelimited("%s was not implemented.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
|
||||
ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
|
||||
mutex_unlock(&hwmgr->smu_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
|
||||
static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
int ret = 0;
|
||||
@ -559,13 +559,13 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
|
||||
if (!hwmgr || !hwmgr->pm_en)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
|
||||
if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
|
||||
pr_info_ratelimited("%s was not implemented.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
|
||||
ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
|
||||
mutex_unlock(&hwmgr->smu_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -1691,8 +1691,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
|
||||
.dispatch_tasks = pp_dpm_dispatch_tasks,
|
||||
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
|
||||
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
|
||||
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
|
||||
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
|
||||
.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
|
||||
.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
|
||||
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
|
||||
.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
|
||||
.get_pp_num_states = pp_dpm_get_pp_num_states,
|
||||
|
@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
else
|
||||
i = 1;
|
||||
|
||||
size += sprintf(buf + size, "0: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
|
||||
data->gfx_min_freq_limit/100,
|
||||
i == 0 ? "*" : "");
|
||||
size += sprintf(buf + size, "1: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
|
||||
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
|
||||
i == 1 ? "*" : "");
|
||||
size += sprintf(buf + size, "2: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
|
||||
data->gfx_max_freq_limit/100,
|
||||
i == 2 ? "*" : "");
|
||||
break;
|
||||
@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i,
|
||||
mclk_table->entries[i].clk / 100,
|
||||
((mclk_table->entries[i].clk / 100)
|
||||
@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
size = sprintf(buf, "%s:\n", "OD_SCLK");
|
||||
size += sprintf(buf + size, "0: %10uMhz\n",
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
|
||||
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
|
||||
size += sprintf(buf + size, "1: %10uMhz\n",
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
|
||||
}
|
||||
break;
|
||||
@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
size = sprintf(buf, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
min_freq, max_freq);
|
||||
}
|
||||
break;
|
||||
@ -1456,11 +1456,11 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
|
||||
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
|
||||
title[1], title[2], title[3], title[4], title[5]);
|
||||
|
||||
for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
|
||||
size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
|
||||
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
|
||||
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
|
||||
profile_mode_setting[i][0], profile_mode_setting[i][1],
|
||||
profile_mode_setting[i][2], profile_mode_setting[i][3]);
|
||||
|
@ -3212,7 +3212,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||
|
||||
if (!ret) {
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
|
||||
else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
|
||||
}
|
||||
@ -4896,8 +4896,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
|
||||
struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
|
||||
struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
|
||||
int i, now, size = 0;
|
||||
uint32_t clock, pcie_speed;
|
||||
int size = 0;
|
||||
uint32_t i, now, clock, pcie_speed;
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
@ -4911,7 +4911,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
now = i;
|
||||
|
||||
for (i = 0; i < sclk_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, sclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4926,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
now = i;
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, mclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4940,7 +4940,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
now = i;
|
||||
|
||||
for (i = 0; i < pcie_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %s %s\n", i,
|
||||
size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
|
||||
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
|
||||
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
|
||||
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
|
||||
@ -4948,32 +4948,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
break;
|
||||
case OD_SCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sprintf(buf, "%s:\n", "OD_SCLK");
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
|
||||
size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
|
||||
i, odn_sclk_table->entries[i].clock/100,
|
||||
odn_sclk_table->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_MCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sprintf(buf, "%s:\n", "OD_MCLK");
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
|
||||
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
|
||||
size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
|
||||
i, odn_mclk_table->entries[i].clock/100,
|
||||
odn_mclk_table->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_RANGE:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sprintf(buf, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
|
||||
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
|
||||
size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
|
||||
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
|
||||
size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
|
||||
data->odn_dpm_table.min_vddc,
|
||||
data->odn_dpm_table.max_vddc);
|
||||
}
|
||||
@ -4988,7 +4988,7 @@ static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case AMD_FAN_CTRL_NONE:
|
||||
smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
|
||||
break;
|
||||
case AMD_FAN_CTRL_MANUAL:
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
@ -5503,7 +5503,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
|
||||
size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
|
||||
title[0], title[1], title[2], title[3],
|
||||
title[4], title[5], title[6], title[7]);
|
||||
|
||||
@ -5511,7 +5511,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (i == hwmgr->power_profile_mode) {
|
||||
size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
|
||||
size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
|
||||
i, profile_name[i], "*",
|
||||
data->current_profile_setting.sclk_up_hyst,
|
||||
data->current_profile_setting.sclk_down_hyst,
|
||||
@ -5522,21 +5522,21 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||
continue;
|
||||
}
|
||||
if (smu7_profiling[i].bupdate_sclk)
|
||||
size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
|
||||
size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
|
||||
i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
|
||||
smu7_profiling[i].sclk_down_hyst,
|
||||
smu7_profiling[i].sclk_activity);
|
||||
else
|
||||
size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
|
||||
size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
|
||||
i, profile_name[i], "-", "-", "-");
|
||||
|
||||
if (smu7_profiling[i].bupdate_mclk)
|
||||
size += sprintf(buf + size, "%16d %16d %16d\n",
|
||||
size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
|
||||
smu7_profiling[i].mclk_up_hyst,
|
||||
smu7_profiling[i].mclk_down_hyst,
|
||||
smu7_profiling[i].mclk_activity);
|
||||
else
|
||||
size += sprintf(buf + size, "%16s %16s %16s\n",
|
||||
size += sysfs_emit_at(buf, size, "%16s %16s %16s\n",
|
||||
"-", "-", "-");
|
||||
}
|
||||
|
||||
@ -5692,8 +5692,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
|
||||
.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
|
||||
.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
|
||||
.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
|
||||
.get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
|
||||
.set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
|
||||
.get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm,
|
||||
.set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm,
|
||||
.reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
|
||||
.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
|
||||
.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
|
||||
|
@ -51,7 +51,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed)
|
||||
{
|
||||
uint32_t duty100;
|
||||
@ -70,12 +70,9 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
tmp64 = (uint64_t)duty * 100;
|
||||
tmp64 = (uint64_t)duty * 255;
|
||||
do_div(tmp64, duty100);
|
||||
*speed = (uint32_t)tmp64;
|
||||
|
||||
if (*speed > 100)
|
||||
*speed = 100;
|
||||
*speed = MIN((uint32_t)tmp64, 255);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -199,12 +196,11 @@ int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
/**
|
||||
* smu7_fan_ctrl_set_fan_speed_percent - Set Fan Speed in percent.
|
||||
* smu7_fan_ctrl_set_fan_speed_pwm - Set Fan Speed in PWM.
|
||||
* @hwmgr: the address of the powerplay hardware manager.
|
||||
* @speed: is the percentage value (0% - 100%) to be set.
|
||||
* Exception: Fails is the 100% setting appears to be 0.
|
||||
* @speed: is the pwm value (0 - 255) to be set.
|
||||
*/
|
||||
int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t speed)
|
||||
{
|
||||
uint32_t duty100;
|
||||
@ -214,8 +210,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan)
|
||||
return 0;
|
||||
|
||||
if (speed > 100)
|
||||
speed = 100;
|
||||
speed = MIN(speed, 255);
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
@ -227,7 +222,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
return -EINVAL;
|
||||
|
||||
tmp64 = (uint64_t)speed * duty100;
|
||||
do_div(tmp64, 100);
|
||||
do_div(tmp64, 255);
|
||||
duty = (uint32_t)tmp64;
|
||||
|
||||
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
|
@ -41,10 +41,10 @@
|
||||
extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
|
||||
extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
|
||||
extern int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed);
|
||||
extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
|
||||
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
extern int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
|
@ -1547,7 +1547,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
struct smu8_hwmgr *data = hwmgr->backend;
|
||||
struct phm_clock_voltage_dependency_table *sclk_table =
|
||||
hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
int i, now, size = 0;
|
||||
uint32_t i, now;
|
||||
int size = 0;
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
@ -1558,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
CURR_SCLK_INDEX);
|
||||
|
||||
for (i = 0; i < sclk_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, sclk_table->entries[i].clk / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -1570,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
CURR_MCLK_INDEX);
|
||||
|
||||
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
|
||||
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
|
||||
break;
|
||||
|
@ -4199,7 +4199,7 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
|
||||
|
||||
switch (mode) {
|
||||
case AMD_FAN_CTRL_NONE:
|
||||
vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
vega10_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
|
||||
break;
|
||||
case AMD_FAN_CTRL_MANUAL:
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
@ -4553,13 +4553,13 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
|
||||
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
|
||||
return ret);
|
||||
|
||||
size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
|
||||
size += sprintf(buf + size, "%-19s %-22s %s\n",
|
||||
size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
|
||||
size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
|
||||
output_title[0],
|
||||
output_title[1],
|
||||
output_title[2]);
|
||||
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
|
||||
size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
|
||||
size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
|
||||
ppfeature_name[i],
|
||||
1ULL << i,
|
||||
(features_enabled & (1ULL << i)) ? "Y" : "N");
|
||||
@ -4650,7 +4650,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
else
|
||||
count = sclk_table->count;
|
||||
for (i = 0; i < count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, sclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4661,7 +4661,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, mclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4672,7 +4672,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
|
||||
|
||||
for (i = 0; i < soc_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, soc_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4684,7 +4684,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
|
||||
|
||||
for (i = 0; i < dcef_table->count; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, dcef_table->dpm_levels[i].value / 100,
|
||||
(dcef_table->dpm_levels[i].value / 100 == now) ?
|
||||
"*" : "");
|
||||
@ -4698,7 +4698,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
gen_speed = pptable->PcieGenSpeed[i];
|
||||
lane_width = pptable->PcieLaneCount[i];
|
||||
|
||||
size += sprintf(buf + size, "%d: %s %s %s\n", i,
|
||||
size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
|
||||
(gen_speed == 0) ? "2.5GT/s," :
|
||||
(gen_speed == 1) ? "5.0GT/s," :
|
||||
(gen_speed == 2) ? "8.0GT/s," :
|
||||
@ -4717,34 +4717,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
|
||||
case OD_SCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sprintf(buf, "%s:\n", "OD_SCLK");
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
|
||||
i, podn_vdd_dep->entries[i].clk / 100,
|
||||
podn_vdd_dep->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_MCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sprintf(buf, "%s:\n", "OD_MCLK");
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
|
||||
i, podn_vdd_dep->entries[i].clk/100,
|
||||
podn_vdd_dep->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_RANGE:
|
||||
if (hwmgr->od_enabled) {
|
||||
size = sprintf(buf, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
|
||||
size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
|
||||
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
|
||||
size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
|
||||
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
|
||||
size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
|
||||
data->odn_dpm_table.min_vddc,
|
||||
data->odn_dpm_table.max_vddc);
|
||||
}
|
||||
@ -5112,21 +5112,28 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
|
||||
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
|
||||
title[1], title[2], title[3], title[4], title[5]);
|
||||
|
||||
for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
|
||||
size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
|
||||
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
|
||||
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
|
||||
profile_mode_setting[i][0], profile_mode_setting[i][1],
|
||||
profile_mode_setting[i][2], profile_mode_setting[i][3]);
|
||||
size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
|
||||
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
|
||||
profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
|
||||
data->custom_profile_mode[0], data->custom_profile_mode[1],
|
||||
data->custom_profile_mode[2], data->custom_profile_mode[3]);
|
||||
return size;
|
||||
}
|
||||
|
||||
static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
return (adev->pdev->device == 0x6860);
|
||||
}
|
||||
|
||||
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
||||
}
|
||||
|
||||
out:
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||
if (vega10_get_power_profile_mode_quirks(hwmgr))
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||
1 << power_profile_mode,
|
||||
NULL);
|
||||
else
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
|
||||
NULL);
|
||||
|
||||
hwmgr->power_profile_mode = power_profile_mode;
|
||||
|
||||
return 0;
|
||||
@ -5523,8 +5536,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
|
||||
.force_dpm_level = vega10_dpm_force_dpm_level,
|
||||
.stop_thermal_controller = vega10_thermal_stop_thermal_controller,
|
||||
.get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
|
||||
.get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
|
||||
.set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
|
||||
.get_fan_speed_pwm = vega10_fan_ctrl_get_fan_speed_pwm,
|
||||
.set_fan_speed_pwm = vega10_fan_ctrl_set_fan_speed_pwm,
|
||||
.reset_fan_speed_to_default =
|
||||
vega10_fan_ctrl_reset_fan_speed_to_default,
|
||||
.get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
|
||||
|
@ -64,7 +64,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed)
|
||||
{
|
||||
uint32_t current_rpm;
|
||||
@ -78,11 +78,11 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
|
||||
if (hwmgr->thermal_controller.
|
||||
advanceFanControlParameters.usMaxFanRPM != 0)
|
||||
percent = current_rpm * 100 /
|
||||
percent = current_rpm * 255 /
|
||||
hwmgr->thermal_controller.
|
||||
advanceFanControlParameters.usMaxFanRPM;
|
||||
|
||||
*speed = percent > 100 ? 100 : percent;
|
||||
*speed = MIN(percent, 255);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -241,12 +241,11 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
/**
|
||||
* vega10_fan_ctrl_set_fan_speed_percent - Set Fan Speed in percent.
|
||||
* vega10_fan_ctrl_set_fan_speed_pwm - Set Fan Speed in PWM.
|
||||
* @hwmgr: the address of the powerplay hardware manager.
|
||||
* @speed: is the percentage value (0% - 100%) to be set.
|
||||
* Exception: Fails is the 100% setting appears to be 0.
|
||||
* @speed: is the percentage value (0 - 255) to be set.
|
||||
*/
|
||||
int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t speed)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
@ -257,8 +256,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan)
|
||||
return 0;
|
||||
|
||||
if (speed > 100)
|
||||
speed = 100;
|
||||
speed = MIN(speed, 255);
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
@ -270,7 +268,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
return -EINVAL;
|
||||
|
||||
tmp64 = (uint64_t)speed * duty100;
|
||||
do_div(tmp64, 100);
|
||||
do_div(tmp64, 255);
|
||||
duty = (uint32_t)tmp64;
|
||||
|
||||
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
|
||||
|
@ -54,12 +54,12 @@ extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
|
||||
extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
|
||||
extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
||||
struct phm_fan_speed_info *fan_speed_info);
|
||||
extern int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
extern int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed);
|
||||
extern int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
|
||||
extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr,
|
||||
uint32_t mode);
|
||||
extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
extern int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t speed);
|
||||
extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
|
||||
extern int vega10_thermal_ctrl_uninitialize_thermal_controller(
|
||||
|
@ -2146,13 +2146,13 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
|
||||
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
|
||||
return ret);
|
||||
|
||||
size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
|
||||
size += sprintf(buf + size, "%-19s %-22s %s\n",
|
||||
size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
|
||||
size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
|
||||
output_title[0],
|
||||
output_title[1],
|
||||
output_title[2]);
|
||||
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
|
||||
size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
|
||||
size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
|
||||
ppfeature_name[i],
|
||||
1ULL << i,
|
||||
(features_enabled & (1ULL << i)) ? "Y" : "N");
|
||||
@ -2256,7 +2256,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get gfx clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
|
||||
break;
|
||||
@ -2272,7 +2272,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get memory clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
|
||||
break;
|
||||
@ -2290,7 +2290,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get soc clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
|
||||
break;
|
||||
@ -2308,7 +2308,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get dcef clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
|
||||
break;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user