drm fixes for 6.9-rc2
bridge: - select DRM_KMS_HELPER dma-buf: - fix NULL-pointer deref dp: - fix div-by-zero in DP MST unplug code fbdev: - select FB_IOMEM_FOPS for SBus sched: - fix NULL-pointer deref xe: - Fix build on mips - Fix wrong bound checks - Fix use of msec rather than jiffies - Remove dead code amdgpu: - SMU 14.0.1 updates - DCN 3.5.x updates - VPE fix - eDP panel flickering fix - Suspend fix - PSR fix - DCN 3.0+ fix - VCN 4.0.6 updates - debugfs fix amdkfd: - DMA-Buf fix - GFX 9.4.2 TLB flush fix - CP interrupt fix i915: - Fix for BUG_ON/BUILD_BUG_ON IN I915_memcpy.c - Update a MTL workaround - Fix locking inversion in hwmon's sysfs - Remove a bogus error message around PXP - Fix UAF on VMA - Reset queue_priority_hint on parking - Display Fixes: - Remove duplicated audio enable/disable on SDVO and DP - Disable AuxCCS for Xe driver - Revert init order of MIPI DSI - DRRS debugfs fix with an extra refactor patch - VRR related fixes - Fix a JSL eDP corruption - Fix the cursor physical dma address - BIOS VBT related fix nouveau: - dmem: handle kcalloc() allocation failures qxl: - remove unused variables rockchip: - vop2: remove support for AR30 and AB30 formats vmwgfx: - debugfs: create ttm_resource_manager entry only if needed -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmYHI4sACgkQDHTzWXnE hr7Olg/7ByVXx3TLGSZESdEJhWmaUWFHrju69FSzJh49laKUQ1qf3FxHxj0fzIlB JCBuqz9vSrWY6brxhLYetnGMNQ1N6dVJNswqzT/ocDPl2C1N8KGmLIHqnyMacaKZ 1pqeek3ZwvrU/eGLzSb6QY72V8mLIcO+JxB4w66ciHN3deI4noFk4X0vuEoIvmyy MC0/Nu+pqNMhAZD/WwQJdPFRYugTlYSvp3KRD4OjzBvIJdPr8PO1T2vh/SjAHGV5 CVmkWEJDmIK4ficA4SdylJdXKlGi/BN+kyMMG70slVkZqRlFnR3agBA4j+3Kea3H vvJsPs5aS1Y1qXgYSTs5mUKRlUsDTVFHQnXjkdIZ/U9Hey5dMKyTm3Z7emMihg9F rFKuSPvikgCbLBHGZHPgjGLzPEPOJPcAXUD4GLzoiMRomGUILvV7EfPZpgXN39dW 3kS398m8H/HJHQJg9BMSyEleKFFXiT/088EbXMwaktxRQsOIoWroh4Q3nZosVd8i pcSl4m0pmeviLx6O1YPA4yp6VQ9osO4AVo4DwyQCw04WL2YxLqzFg2Rk4d3Vd+9B tyvaAog5dzVmoW6uyJr/dueLTDraK/KgoHcvtHRbWqBL/XR9KJX9PaktIpAUN/tM AEIllxRO3JXaM+4w6Z6JFwKBidRT3HBXNIZ/ySQJpuBSvdAFx0U= =vT1C -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-03-30' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Regular fixes for rc2, quite a few i915/amdgpu as usual, some xe, and then mostly scattered around. rc3 might be quieter with the holidays but we shall see. bridge: - select DRM_KMS_HELPER dma-buf: - fix NULL-pointer deref dp: - fix div-by-zero in DP MST unplug code fbdev: - select FB_IOMEM_FOPS for SBus sched: - fix NULL-pointer deref xe: - Fix build on mips - Fix wrong bound checks - Fix use of msec rather than jiffies - Remove dead code amdgpu: - SMU 14.0.1 updates - DCN 3.5.x updates - VPE fix - eDP panel flickering fix - Suspend fix - PSR fix - DCN 3.0+ fix - VCN 4.0.6 updates - debugfs fix amdkfd: - DMA-Buf fix - GFX 9.4.2 TLB flush fix - CP interrupt fix i915: - Fix for BUG_ON/BUILD_BUG_ON IN I915_memcpy.c - Update a MTL workaround - Fix locking inversion in hwmon's sysfs - Remove a bogus error message around PXP - Fix UAF on VMA - Reset queue_priority_hint on parking - Display Fixes: - Remove duplicated audio enable/disable on SDVO and DP - Disable AuxCCS for Xe driver - Revert init order of MIPI DSI - DRRS debugfs fix with an extra refactor patch - VRR related fixes - Fix a JSL eDP corruption - Fix the cursor physical dma address - BIOS VBT related fix nouveau: - dmem: handle kcalloc() allocation failures qxl: - remove unused variables rockchip: - vop2: remove support for AR30 and AB30 formats vmwgfx: - debugfs: create ttm_resource_manager entry only if needed" * tag 'drm-fixes-2024-03-30' of https://gitlab.freedesktop.org/drm/kernel: (55 commits) drm/i915/bios: Tolerate devdata==NULL in intel_bios_encoder_supports_dp_dual_mode() drm/i915: Pre-populate the cursor physical dma address drm/i915/gt: Reset queue_priority_hint on parking drm/i915/vma: Fix UAF on destroy against retire race drm/i915: Do not print 'pxp init failed with 0' when it succeed drm/i915: Do not match JSL in ehl_combo_pll_div_frac_wa_needed() drm/i915/hwmon: Fix locking inversion in sysfs getter drm/i915/dsb: Fix DSB vblank waits when using VRR drm/i915/vrr: Generate VRR "safe window" for DSB drm/i915/display/debugfs: Fix duplicate checks in i915_drrs_status drm/i915/drrs: Refactor CPU transcoder DRRS check drm/i915/mtl: Update workaround 14018575942 drm/i915/dsi: Go back to the previous INIT_OTP/DISPLAY_ON order, mostly drm/i915/display: Disable AuxCCS framebuffers if built for Xe drm/i915: Stop doing double audio enable/disable on SDVO and g4x+ DP drm/i915: Add includes for BUG_ON/BUILD_BUG_ON in i915_memcpy.c drm/qxl: remove unused variable from `qxl_process_single_command()` drm/qxl: remove unused `count` variable from `qxl_surface_id_alloc()` drm/i915: add bug.h include to i915_memcpy.c drm/vmwgfx: Create debugfs ttm_resource_manager entry only if needed ...
This commit is contained in:
commit
486291a0e6
@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
chain = mock_chain(NULL, f, 1);
|
||||
if (!chain)
|
||||
if (chain)
|
||||
dma_fence_enable_sw_signaling(chain);
|
||||
else
|
||||
err = -ENOMEM;
|
||||
|
||||
dma_fence_enable_sw_signaling(chain);
|
||||
|
||||
dma_fence_signal(f);
|
||||
dma_fence_put(f);
|
||||
|
||||
|
@ -4539,6 +4539,8 @@ int amdgpu_device_prepare(struct drm_device *dev)
|
||||
if (r)
|
||||
goto unprepare;
|
||||
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
@ -2237,6 +2237,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
|
||||
case IP_VERSION(4, 0, 5):
|
||||
case IP_VERSION(4, 0, 6):
|
||||
if (amdgpu_umsch_mm & 0x1) {
|
||||
amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
|
||||
adev->enable_umsch_mm = true;
|
||||
|
@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
|
||||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
volatile u32 *mqd;
|
||||
int r;
|
||||
u32 *kbuf;
|
||||
int r, i;
|
||||
uint32_t value, result;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
result = 0;
|
||||
kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto err_unreserve;
|
||||
|
||||
/*
|
||||
* Copy to local buffer to avoid put_user(), which might fault
|
||||
* and acquire mmap_sem, under reservation_ww_class_mutex.
|
||||
*/
|
||||
for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
|
||||
kbuf[i] = mqd[i];
|
||||
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
|
||||
result = 0;
|
||||
while (size) {
|
||||
if (*pos >= ring->mqd_size)
|
||||
goto done;
|
||||
break;
|
||||
|
||||
value = mqd[*pos/4];
|
||||
value = kbuf[*pos/4];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r)
|
||||
goto done;
|
||||
goto err_free;
|
||||
buf += 4;
|
||||
result += 4;
|
||||
size -= 4;
|
||||
*pos += 4;
|
||||
}
|
||||
|
||||
done:
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
mqd = NULL;
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
kfree(kbuf);
|
||||
return result;
|
||||
|
||||
err_unreserve:
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
err_free:
|
||||
kfree(kbuf);
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_mqd_fops = {
|
||||
|
@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev,
|
||||
mqd->rptr_val = 0;
|
||||
mqd->unmapped = 1;
|
||||
|
||||
if (adev->vpe.collaborate_mode)
|
||||
memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
|
||||
|
||||
qinfo->mqd_addr = test->mqd_data_gpu_addr;
|
||||
qinfo->csa_addr = test->ctx_data_gpu_addr +
|
||||
offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
|
||||
qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1;
|
||||
qinfo->doorbell_offset_0 = 0;
|
||||
qinfo->doorbell_offset_1 = 0;
|
||||
}
|
||||
|
||||
@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te
|
||||
ring[5] = 0;
|
||||
|
||||
mqd->wptr_val = (6 << 2);
|
||||
// WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
|
||||
if (adev->vpe.collaborate_mode)
|
||||
(++mqd)->wptr_val = (6 << 2);
|
||||
|
||||
WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (*fence == test_pattern)
|
||||
@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
|
||||
case IP_VERSION(4, 0, 5):
|
||||
case IP_VERSION(4, 0, 6):
|
||||
fw_name = "amdgpu/umsch_mm_4_0_0.bin";
|
||||
break;
|
||||
default:
|
||||
@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle)
|
||||
|
||||
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
|
||||
case IP_VERSION(4, 0, 5):
|
||||
case IP_VERSION(4, 0, 6):
|
||||
umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
|
||||
break;
|
||||
default:
|
||||
|
@ -33,13 +33,6 @@ enum UMSCH_SWIP_ENGINE_TYPE {
|
||||
UMSCH_SWIP_ENGINE_TYPE_MAX
|
||||
};
|
||||
|
||||
enum UMSCH_SWIP_AFFINITY_TYPE {
|
||||
UMSCH_SWIP_AFFINITY_TYPE_ANY = 0,
|
||||
UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1,
|
||||
UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2,
|
||||
UMSCH_SWIP_AFFINITY_TYPE_MAX
|
||||
};
|
||||
|
||||
enum UMSCH_CONTEXT_PRIORITY_LEVEL {
|
||||
CONTEXT_PRIORITY_LEVEL_IDLE = 0,
|
||||
CONTEXT_PRIORITY_LEVEL_NORMAL = 1,
|
||||
@ -51,13 +44,15 @@ enum UMSCH_CONTEXT_PRIORITY_LEVEL {
|
||||
struct umsch_mm_set_resource_input {
|
||||
uint32_t vmid_mask_mm_vcn;
|
||||
uint32_t vmid_mask_mm_vpe;
|
||||
uint32_t collaboration_mask_vpe;
|
||||
uint32_t logging_vmid;
|
||||
uint32_t engine_mask;
|
||||
union {
|
||||
struct {
|
||||
uint32_t disable_reset : 1;
|
||||
uint32_t disable_umsch_mm_log : 1;
|
||||
uint32_t reserved : 30;
|
||||
uint32_t use_rs64mem_for_proc_ctx_csa : 1;
|
||||
uint32_t reserved : 29;
|
||||
};
|
||||
uint32_t uint32_all;
|
||||
};
|
||||
@ -78,15 +73,18 @@ struct umsch_mm_add_queue_input {
|
||||
uint32_t doorbell_offset_1;
|
||||
enum UMSCH_SWIP_ENGINE_TYPE engine_type;
|
||||
uint32_t affinity;
|
||||
enum UMSCH_SWIP_AFFINITY_TYPE affinity_type;
|
||||
uint64_t mqd_addr;
|
||||
uint64_t h_context;
|
||||
uint64_t h_queue;
|
||||
uint32_t vm_context_cntl;
|
||||
|
||||
uint32_t process_csa_array_index;
|
||||
uint32_t context_csa_array_index;
|
||||
|
||||
struct {
|
||||
uint32_t is_context_suspended : 1;
|
||||
uint32_t reserved : 31;
|
||||
uint32_t collaboration_mode : 1;
|
||||
uint32_t reserved : 30;
|
||||
};
|
||||
};
|
||||
|
||||
@ -94,6 +92,7 @@ struct umsch_mm_remove_queue_input {
|
||||
uint32_t doorbell_offset_0;
|
||||
uint32_t doorbell_offset_1;
|
||||
uint64_t context_csa_addr;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
struct MQD_INFO {
|
||||
@ -103,6 +102,7 @@ struct MQD_INFO {
|
||||
uint32_t wptr_val;
|
||||
uint32_t rptr_val;
|
||||
uint32_t unmapped;
|
||||
uint32_t vmid;
|
||||
};
|
||||
|
||||
struct amdgpu_umsch_mm;
|
||||
|
@ -396,6 +396,12 @@ static int vpe_hw_init(void *handle)
|
||||
struct amdgpu_vpe *vpe = &adev->vpe;
|
||||
int ret;
|
||||
|
||||
/* Power on VPE */
|
||||
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vpe_load_microcode(vpe);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
|
||||
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
|
||||
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
|
||||
1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
|
||||
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
|
||||
@ -248,7 +248,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
|
||||
data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
|
||||
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
|
||||
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
|
||||
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
|
||||
2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
|
||||
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
|
||||
@ -271,6 +271,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
|
||||
set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
|
||||
set_hw_resources.collaboration_mask_vpe =
|
||||
adev->vpe.collaborate_mode ? 0x3 : 0x0;
|
||||
set_hw_resources.engine_mask = umsch->engine_mask;
|
||||
|
||||
set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
|
||||
@ -346,6 +348,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
|
||||
add_queue.h_queue = input_ptr->h_queue;
|
||||
add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
|
||||
add_queue.is_context_suspended = input_ptr->is_context_suspended;
|
||||
add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
|
||||
|
||||
add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
|
||||
add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
|
||||
|
@ -1523,7 +1523,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
|
||||
|
||||
/* Find a KFD GPU device that supports the get_dmabuf_info query */
|
||||
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
|
||||
if (dev)
|
||||
if (dev && !kfd_devcgroup_check_permission(dev))
|
||||
break;
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
@ -1545,7 +1545,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
|
||||
if (xcp_id >= 0)
|
||||
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
|
||||
else
|
||||
args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
|
||||
args->gpu_id = dev->id;
|
||||
args->flags = flags;
|
||||
|
||||
/* Copy metadata buffer to user mode */
|
||||
|
@ -339,7 +339,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
|
||||
break;
|
||||
}
|
||||
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
|
||||
KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
|
||||
kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_DEBUG_DOORBELL_ID(context_id0),
|
||||
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
|
||||
|
@ -328,7 +328,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
|
||||
/* CP */
|
||||
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
|
||||
kfd_signal_event_interrupt(pasid, context_id0, 32);
|
||||
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
|
||||
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
|
||||
KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
|
||||
kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_CTXID0_DOORBELL_ID(context_id0),
|
||||
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
|
||||
|
@ -388,7 +388,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
|
||||
break;
|
||||
}
|
||||
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
|
||||
KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
|
||||
kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_DEBUG_DOORBELL_ID(context_id0),
|
||||
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
|
||||
|
@ -1473,7 +1473,7 @@ static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
|
||||
|
||||
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
|
||||
{
|
||||
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
|
||||
return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
|
||||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
|
||||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
|
||||
}
|
||||
|
@ -6305,9 +6305,8 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
|
||||
else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
stream->signal == SIGNAL_TYPE_EDP) {
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
@ -6323,9 +6322,8 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled)
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
}
|
||||
finish:
|
||||
dc_sink_release(sink);
|
||||
|
@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
|
||||
* amdgpu_dm_psr_enable() - enable psr f/w
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = stream->link;
|
||||
unsigned int vsync_rate_hz = 0;
|
||||
@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
|
||||
power_opt |= psr_power_opt_z10_static_screen;
|
||||
|
||||
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
|
||||
dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
|
||||
|
||||
if (link->ctx->dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(link->ctx->dc, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
|
||||
|
||||
void amdgpu_dm_set_psr_caps(struct dc_link *link);
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
|
||||
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
|
||||
|
@ -73,6 +73,8 @@
|
||||
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
|
||||
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
|
||||
|
||||
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
|
||||
|
||||
#define REG(reg_name) \
|
||||
(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
|
||||
|
||||
@ -411,9 +413,12 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
|
||||
|
||||
static void init_clk_states(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
|
||||
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
|
||||
if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
|
||||
clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
|
||||
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
@ -709,7 +714,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
clock_table->NumFclkLevelsEnabled;
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
|
||||
|
||||
num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
|
||||
num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
|
||||
clock_table->NumDcfClkLevelsEnabled;
|
||||
for (i = 0; i < num_dcfclk; i++) {
|
||||
int j;
|
||||
|
@ -3024,7 +3024,8 @@ static void backup_planes_and_stream_state(
|
||||
scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
|
||||
}
|
||||
scratch->stream_state = *stream;
|
||||
scratch->out_transfer_func = *stream->out_transfer_func;
|
||||
if (stream->out_transfer_func)
|
||||
scratch->out_transfer_func = *stream->out_transfer_func;
|
||||
}
|
||||
|
||||
static void restore_planes_and_stream_state(
|
||||
@ -3046,7 +3047,8 @@ static void restore_planes_and_stream_state(
|
||||
*status->plane_states[i]->blend_tf = scratch->blend_tf[i];
|
||||
}
|
||||
*stream = scratch->stream_state;
|
||||
*stream->out_transfer_func = scratch->out_transfer_func;
|
||||
if (stream->out_transfer_func)
|
||||
*stream->out_transfer_func = scratch->out_transfer_func;
|
||||
}
|
||||
|
||||
static bool update_planes_and_stream_state(struct dc *dc,
|
||||
|
@ -44,6 +44,36 @@
|
||||
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
|
||||
|
||||
|
||||
void mpc3_mpc_init(struct mpc *mpc)
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
int opp_id;
|
||||
|
||||
mpc1_mpc_init(mpc);
|
||||
|
||||
for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
|
||||
if (REG(MUX[opp_id]))
|
||||
/* disable mpc out rate and flow control */
|
||||
REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
|
||||
1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
|
||||
mpc1_mpc_init_single_inst(mpc, mpcc_id);
|
||||
|
||||
/* assuming mpc out mux is connected to opp with the same index at this
|
||||
* point in time (e.g. transitioning from vbios to driver)
|
||||
*/
|
||||
if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
|
||||
/* disable mpc out rate and flow control */
|
||||
REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
|
||||
1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
|
||||
}
|
||||
|
||||
bool mpc3_is_dwb_idle(
|
||||
struct mpc *mpc,
|
||||
int dwb_id)
|
||||
@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
|
||||
MPC_DWB0_MUX, 0xf);
|
||||
}
|
||||
|
||||
void mpc3_set_out_rate_control(
|
||||
struct mpc *mpc,
|
||||
int opp_id,
|
||||
bool enable,
|
||||
bool rate_2x_mode,
|
||||
struct mpc_dwb_flow_control *flow_control)
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
|
||||
REG_UPDATE_2(MUX[opp_id],
|
||||
MPC_OUT_RATE_CONTROL_DISABLE, !enable,
|
||||
MPC_OUT_RATE_CONTROL, rate_2x_mode);
|
||||
|
||||
if (flow_control)
|
||||
REG_UPDATE_2(MUX[opp_id],
|
||||
MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
|
||||
MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
|
||||
}
|
||||
|
||||
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
|
||||
{
|
||||
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
|
||||
@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
|
||||
.read_mpcc_state = mpc3_read_mpcc_state,
|
||||
.insert_plane = mpc1_insert_plane,
|
||||
.remove_mpcc = mpc1_remove_mpcc,
|
||||
.mpc_init = mpc1_mpc_init,
|
||||
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
|
||||
.mpc_init = mpc3_mpc_init,
|
||||
.mpc_init_single_inst = mpc3_mpc_init_single_inst,
|
||||
.update_blending = mpc2_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
|
||||
@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
|
||||
.set_dwb_mux = mpc3_set_dwb_mux,
|
||||
.disable_dwb_mux = mpc3_disable_dwb_mux,
|
||||
.is_dwb_idle = mpc3_is_dwb_idle,
|
||||
.set_out_rate_control = mpc3_set_out_rate_control,
|
||||
.set_gamut_remap = mpc3_set_gamut_remap,
|
||||
.program_shaper = mpc3_program_shaper,
|
||||
.acquire_rmu = mpcc3_acquire_rmu,
|
||||
|
@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
|
||||
int num_mpcc,
|
||||
int num_rmu);
|
||||
|
||||
void mpc3_mpc_init(
|
||||
struct mpc *mpc);
|
||||
|
||||
void mpc3_mpc_init_single_inst(
|
||||
struct mpc *mpc,
|
||||
unsigned int mpcc_id);
|
||||
|
||||
bool mpc3_program_shaper(
|
||||
struct mpc *mpc,
|
||||
const struct pwl_params *params,
|
||||
@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle(
|
||||
struct mpc *mpc,
|
||||
int dwb_id);
|
||||
|
||||
void mpc3_set_out_rate_control(
|
||||
struct mpc *mpc,
|
||||
int opp_id,
|
||||
bool enable,
|
||||
bool rate_2x_mode,
|
||||
struct mpc_dwb_flow_control *flow_control);
|
||||
|
||||
void mpc3_power_on_ogam_lut(
|
||||
struct mpc *mpc, int mpcc_id,
|
||||
bool power_on);
|
||||
|
@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
int mpcc_id;
|
||||
|
||||
mpc1_mpc_init(mpc);
|
||||
mpc3_mpc_init(mpc);
|
||||
|
||||
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
|
||||
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
|
||||
@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
|
||||
.insert_plane = mpc1_insert_plane,
|
||||
.remove_mpcc = mpc1_remove_mpcc,
|
||||
.mpc_init = mpc32_mpc_init,
|
||||
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
|
||||
.mpc_init_single_inst = mpc3_mpc_init_single_inst,
|
||||
.update_blending = mpc2_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
|
||||
@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
|
||||
.set_dwb_mux = mpc3_set_dwb_mux,
|
||||
.disable_dwb_mux = mpc3_disable_dwb_mux,
|
||||
.is_dwb_idle = mpc3_is_dwb_idle,
|
||||
.set_out_rate_control = mpc3_set_out_rate_control,
|
||||
.set_gamut_remap = mpc3_set_gamut_remap,
|
||||
.program_shaper = mpc32_program_shaper,
|
||||
.program_3dlut = mpc32_program_3dlut,
|
||||
|
@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 28.0,
|
||||
.sr_enter_plus_exit_time_us = 30.0,
|
||||
.sr_exit_z8_time_us = 210.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 320.0,
|
||||
.sr_exit_z8_time_us = 250.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 350.0,
|
||||
.fclk_change_latency_us = 24.0,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.writeback_latency_us = 12.0,
|
||||
|
@ -98,55 +98,114 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
|
||||
.clock_limits = {
|
||||
{
|
||||
.state = 0,
|
||||
.dispclk_mhz = 1200.0,
|
||||
.dppclk_mhz = 1200.0,
|
||||
.dcfclk_mhz = 400.0,
|
||||
.fabricclk_mhz = 400.0,
|
||||
.socclk_mhz = 600.0,
|
||||
.dram_speed_mts = 3200.0,
|
||||
.dispclk_mhz = 600.0,
|
||||
.dppclk_mhz = 600.0,
|
||||
.phyclk_mhz = 600.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 186.0,
|
||||
.dscclk_mhz = 200.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 1,
|
||||
.dispclk_mhz = 1200.0,
|
||||
.dppclk_mhz = 1200.0,
|
||||
.dcfclk_mhz = 600.0,
|
||||
.fabricclk_mhz = 1000.0,
|
||||
.socclk_mhz = 733.0,
|
||||
.dram_speed_mts = 6400.0,
|
||||
.dispclk_mhz = 800.0,
|
||||
.dppclk_mhz = 800.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 209.0,
|
||||
.dscclk_mhz = 266.7,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 2,
|
||||
.dispclk_mhz = 1200.0,
|
||||
.dppclk_mhz = 1200.0,
|
||||
.dcfclk_mhz = 738.0,
|
||||
.fabricclk_mhz = 1200.0,
|
||||
.socclk_mhz = 880.0,
|
||||
.dram_speed_mts = 7500.0,
|
||||
.dispclk_mhz = 800.0,
|
||||
.dppclk_mhz = 800.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 209.0,
|
||||
.dscclk_mhz = 266.7,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 3,
|
||||
.dispclk_mhz = 1200.0,
|
||||
.dppclk_mhz = 1200.0,
|
||||
.dcfclk_mhz = 800.0,
|
||||
.fabricclk_mhz = 1400.0,
|
||||
.socclk_mhz = 978.0,
|
||||
.dram_speed_mts = 7500.0,
|
||||
.dispclk_mhz = 960.0,
|
||||
.dppclk_mhz = 960.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 371.0,
|
||||
.dscclk_mhz = 320.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 4,
|
||||
.dcfclk_mhz = 873.0,
|
||||
.fabricclk_mhz = 1600.0,
|
||||
.socclk_mhz = 1100.0,
|
||||
.dram_speed_mts = 8533.0,
|
||||
.dispclk_mhz = 1066.7,
|
||||
.dppclk_mhz = 1066.7,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 355.6,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 5,
|
||||
.dcfclk_mhz = 960.0,
|
||||
.fabricclk_mhz = 1700.0,
|
||||
.socclk_mhz = 1257.0,
|
||||
.dram_speed_mts = 8533.0,
|
||||
.dispclk_mhz = 1200.0,
|
||||
.dppclk_mhz = 1200.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 417.0,
|
||||
.dscclk_mhz = 400.0,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 6,
|
||||
.dcfclk_mhz = 1067.0,
|
||||
.fabricclk_mhz = 1850.0,
|
||||
.socclk_mhz = 1257.0,
|
||||
.dram_speed_mts = 8533.0,
|
||||
.dispclk_mhz = 1371.4,
|
||||
.dppclk_mhz = 1371.4,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 457.1,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
{
|
||||
.state = 7,
|
||||
.dcfclk_mhz = 1200.0,
|
||||
.fabricclk_mhz = 2000.0,
|
||||
.socclk_mhz = 1467.0,
|
||||
.dram_speed_mts = 8533.0,
|
||||
.dispclk_mhz = 1600.0,
|
||||
.dppclk_mhz = 1600.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.dscclk_mhz = 533.3,
|
||||
.dtbclk_mhz = 600.0,
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.num_states = 8,
|
||||
.sr_exit_time_us = 28.0,
|
||||
.sr_enter_plus_exit_time_us = 30.0,
|
||||
.sr_exit_z8_time_us = 210.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 320.0,
|
||||
.sr_exit_z8_time_us = 250.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 350.0,
|
||||
.fclk_change_latency_us = 24.0,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.writeback_latency_us = 12.0,
|
||||
@ -177,6 +236,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
|
||||
.do_urgent_latency_adjustment = 0,
|
||||
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
||||
.num_chans = 4,
|
||||
.dram_clock_change_latency_us = 11.72,
|
||||
.dispclk_dppclk_vco_speed_mhz = 2400.0,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -340,6 +402,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
|
||||
clock_limits[i].socclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
|
||||
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
|
||||
clock_limits[i].dtbclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
|
||||
@ -352,6 +416,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
|
||||
clk_table->num_entries;
|
||||
}
|
||||
}
|
||||
|
||||
@ -551,6 +617,7 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
plane_count++;
|
||||
}
|
||||
|
||||
/*dcn351 does not support z9/z10*/
|
||||
if (context->stream_count == 0 || plane_count == 0) {
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
|
||||
@ -564,11 +631,9 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
|
||||
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
|
||||
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
|
||||
|
||||
|
||||
/*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
|
||||
if (is_pwrseq0 && (is_psr || is_replay))
|
||||
if (is_pwrseq0 && (is_psr || is_replay))
|
||||
support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
|
||||
}
|
||||
context->bw_ctx.bw.dcn.clk.zstate_support = support;
|
||||
}
|
||||
|
@ -228,17 +228,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
|
||||
break;
|
||||
|
||||
case dml_project_dcn35:
|
||||
case dml_project_dcn351:
|
||||
out->num_chans = 4;
|
||||
out->round_trip_ping_latency_dcfclk_cycles = 106;
|
||||
out->smn_latency_us = 2;
|
||||
out->dispclk_dppclk_vco_speed_mhz = 3600;
|
||||
break;
|
||||
|
||||
case dml_project_dcn351:
|
||||
out->num_chans = 16;
|
||||
out->round_trip_ping_latency_dcfclk_cycles = 1100;
|
||||
out->smn_latency_us = 2;
|
||||
break;
|
||||
}
|
||||
/* ---Overrides if available--- */
|
||||
if (dml2->config.bbox_overrides.dram_num_chan)
|
||||
|
@ -1185,7 +1185,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
if (dccg) {
|
||||
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
|
||||
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
if (dccg && dccg->funcs->set_dtbclk_dto)
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
}
|
||||
} else if (dccg && dccg->funcs->disable_symclk_se) {
|
||||
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
|
||||
|
@ -69,29 +69,6 @@
|
||||
#define FN(reg_name, field_name) \
|
||||
hws->shifts->field_name, hws->masks->field_name
|
||||
|
||||
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
|
||||
int opp_cnt)
|
||||
{
|
||||
bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
int flow_ctrl_cnt;
|
||||
|
||||
if (opp_cnt >= 2)
|
||||
hblank_halved = true;
|
||||
|
||||
flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
|
||||
stream->timing.h_border_left -
|
||||
stream->timing.h_border_right;
|
||||
|
||||
if (hblank_halved)
|
||||
flow_ctrl_cnt /= 2;
|
||||
|
||||
/* ODM combine 4:1 case */
|
||||
if (opp_cnt == 4)
|
||||
flow_ctrl_cnt /= 2;
|
||||
|
||||
return flow_ctrl_cnt;
|
||||
}
|
||||
|
||||
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 0;
|
||||
int opp_inst[MAX_PIPES] = {0};
|
||||
bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
|
||||
struct mpc_dwb_flow_control flow_control;
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
int i;
|
||||
|
||||
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
|
||||
|
||||
@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
|
||||
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
|
||||
rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
|
||||
flow_control.flow_ctrl_mode = 0;
|
||||
flow_control.flow_ctrl_cnt0 = 0x80;
|
||||
flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
|
||||
if (mpc->funcs->set_out_rate_control) {
|
||||
for (i = 0; i < opp_cnt; ++i) {
|
||||
mpc->funcs->set_out_rate_control(
|
||||
mpc, opp_inst[i],
|
||||
true,
|
||||
rate_control_2x_pclk,
|
||||
&flow_control);
|
||||
}
|
||||
}
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
|
||||
odm_pipe->stream_res.opp,
|
||||
|
@ -966,29 +966,6 @@ void dcn32_init_hw(struct dc *dc)
|
||||
}
|
||||
}
|
||||
|
||||
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
|
||||
int opp_cnt)
|
||||
{
|
||||
bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
int flow_ctrl_cnt;
|
||||
|
||||
if (opp_cnt >= 2)
|
||||
hblank_halved = true;
|
||||
|
||||
flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
|
||||
stream->timing.h_border_left -
|
||||
stream->timing.h_border_right;
|
||||
|
||||
if (hblank_halved)
|
||||
flow_ctrl_cnt /= 2;
|
||||
|
||||
/* ODM combine 4:1 case */
|
||||
if (opp_cnt == 4)
|
||||
flow_ctrl_cnt /= 2;
|
||||
|
||||
return flow_ctrl_cnt;
|
||||
}
|
||||
|
||||
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
@ -1103,10 +1080,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 0;
|
||||
int opp_inst[MAX_PIPES] = {0};
|
||||
bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
|
||||
struct mpc_dwb_flow_control flow_control;
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
int i;
|
||||
|
||||
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
|
||||
|
||||
@ -1119,20 +1092,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
|
||||
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
|
||||
rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
|
||||
flow_control.flow_ctrl_mode = 0;
|
||||
flow_control.flow_ctrl_cnt0 = 0x80;
|
||||
flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
|
||||
if (mpc->funcs->set_out_rate_control) {
|
||||
for (i = 0; i < opp_cnt; ++i) {
|
||||
mpc->funcs->set_out_rate_control(
|
||||
mpc, opp_inst[i],
|
||||
true,
|
||||
rate_control_2x_pclk,
|
||||
&flow_control);
|
||||
}
|
||||
}
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
|
||||
odm_pipe->stream_res.opp,
|
||||
|
@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc)
|
||||
}
|
||||
}
|
||||
|
||||
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
|
||||
int opp_cnt)
|
||||
{
|
||||
bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
int flow_ctrl_cnt;
|
||||
|
||||
if (opp_cnt >= 2)
|
||||
hblank_halved = true;
|
||||
|
||||
flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
|
||||
stream->timing.h_border_left -
|
||||
stream->timing.h_border_right;
|
||||
|
||||
if (hblank_halved)
|
||||
flow_ctrl_cnt /= 2;
|
||||
|
||||
/* ODM combine 4:1 case */
|
||||
if (opp_cnt == 4)
|
||||
flow_ctrl_cnt /= 2;
|
||||
|
||||
return flow_ctrl_cnt;
|
||||
}
|
||||
|
||||
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 0;
|
||||
int opp_inst[MAX_PIPES] = {0};
|
||||
bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
|
||||
struct mpc_dwb_flow_control flow_control;
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
int i;
|
||||
|
||||
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
|
||||
|
||||
@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
|
||||
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
|
||||
rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
|
||||
flow_control.flow_ctrl_mode = 0;
|
||||
flow_control.flow_ctrl_cnt0 = 0x80;
|
||||
flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
|
||||
if (mpc->funcs->set_out_rate_control) {
|
||||
for (i = 0; i < opp_cnt; ++i) {
|
||||
mpc->funcs->set_out_rate_control(
|
||||
mpc, opp_inst[i],
|
||||
true,
|
||||
rate_control_2x_pclk,
|
||||
&flow_control);
|
||||
}
|
||||
}
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
|
||||
odm_pipe->stream_res.opp,
|
||||
|
@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
|
||||
.prepare_bandwidth = dcn35_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn35_optimize_bandwidth,
|
||||
.update_bandwidth = dcn20_update_bandwidth,
|
||||
.set_drr = dcn10_set_drr,
|
||||
.set_drr = dcn35_set_drr,
|
||||
.get_position = dcn10_get_position,
|
||||
.set_static_screen_control = dcn35_set_static_screen_control,
|
||||
.setup_stereo = dcn10_setup_stereo,
|
||||
|
@ -700,6 +700,8 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.disable_dpp_power_gate = true,
|
||||
.disable_hubp_power_gate = true,
|
||||
.disable_optc_power_gate = true, /*should the same as above two*/
|
||||
.disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
|
||||
.disable_clock_gate = false,
|
||||
.disable_dsc_power_gate = true,
|
||||
.vsr_support = true,
|
||||
@ -742,12 +744,13 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
},
|
||||
.seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
|
||||
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
|
||||
.minimum_z8_residency_time = 2100,
|
||||
.using_dml2 = true,
|
||||
.support_eDP1_5 = true,
|
||||
.enable_hpo_pg_support = false,
|
||||
.enable_legacy_fast_update = true,
|
||||
.enable_single_display_2to1_odm_policy = true,
|
||||
.disable_idle_power_optimizations = true,
|
||||
.disable_idle_power_optimizations = false,
|
||||
.dmcub_emulation = false,
|
||||
.disable_boot_optimizations = false,
|
||||
.disable_unbounded_requesting = false,
|
||||
@ -758,8 +761,10 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_z10 = true,
|
||||
.ignore_pg = true,
|
||||
.psp_disabled_wa = true,
|
||||
.ips2_eval_delay_us = 200,
|
||||
.ips2_entry_delay_us = 400
|
||||
.ips2_eval_delay_us = 2000,
|
||||
.ips2_entry_delay_us = 800,
|
||||
.disable_dmub_reallow_idle = true,
|
||||
.static_screen_wait_frames = 2,
|
||||
};
|
||||
|
||||
static const struct dc_panel_config panel_config_defaults = {
|
||||
|
@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
|
||||
}
|
||||
|
||||
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
|
||||
if (stream->link->psr_settings.psr_feature_enabled) {
|
||||
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
|
||||
vsc_packet_revision = vsc_packet_rev4;
|
||||
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
|
||||
vsc_packet_revision = vsc_packet_rev2;
|
||||
}
|
||||
|
||||
if (stream->link->replay_settings.config.replay_supported)
|
||||
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
|
||||
vsc_packet_revision = vsc_packet_rev4;
|
||||
else if (stream->link->replay_settings.config.replay_supported)
|
||||
vsc_packet_revision = vsc_packet_rev4;
|
||||
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
|
||||
vsc_packet_revision = vsc_packet_rev2;
|
||||
|
||||
/* Update to revision 5 for extended colorimetry support */
|
||||
if (stream->use_vsc_sdp_for_colorimetry)
|
||||
|
@ -234,7 +234,8 @@ union UMSCHAPI__SET_HW_RESOURCES {
|
||||
uint32_t enable_level_process_quantum_check : 1;
|
||||
uint32_t is_vcn0_enabled : 1;
|
||||
uint32_t is_vcn1_enabled : 1;
|
||||
uint32_t reserved : 27;
|
||||
uint32_t use_rs64mem_for_proc_ctx_csa : 1;
|
||||
uint32_t reserved : 26;
|
||||
};
|
||||
uint32_t uint32_all;
|
||||
};
|
||||
@ -297,9 +298,12 @@ union UMSCHAPI__ADD_QUEUE {
|
||||
|
||||
struct {
|
||||
uint32_t is_context_suspended : 1;
|
||||
uint32_t reserved : 31;
|
||||
uint32_t collaboration_mode : 1;
|
||||
uint32_t reserved : 30;
|
||||
};
|
||||
struct UMSCH_API_STATUS api_status;
|
||||
uint32_t process_csa_array_index;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
@ -314,6 +318,7 @@ union UMSCHAPI__REMOVE_QUEUE {
|
||||
uint64_t context_csa_addr;
|
||||
|
||||
struct UMSCH_API_STATUS api_status;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
@ -337,6 +342,7 @@ union UMSCHAPI__SUSPEND {
|
||||
uint32_t suspend_fence_value;
|
||||
|
||||
struct UMSCH_API_STATUS api_status;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
@ -356,6 +362,7 @@ union UMSCHAPI__RESUME {
|
||||
enum UMSCH_ENGINE_TYPE engine_type;
|
||||
|
||||
struct UMSCH_API_STATUS api_status;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
@ -404,6 +411,7 @@ union UMSCHAPI__UPDATE_AFFINITY {
|
||||
union UMSCH_AFFINITY affinity;
|
||||
uint64_t context_csa_addr;
|
||||
struct UMSCH_API_STATUS api_status;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
@ -417,6 +425,7 @@ union UMSCHAPI__CHANGE_CONTEXT_PRIORITY_LEVEL {
|
||||
uint64_t context_quantum;
|
||||
uint64_t context_csa_addr;
|
||||
struct UMSCH_API_STATUS api_status;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
|
@ -54,14 +54,14 @@
|
||||
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
|
||||
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
|
||||
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
|
||||
#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
|
||||
#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
|
||||
#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
|
||||
#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
|
||||
#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
|
||||
#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
|
||||
#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
|
||||
#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
|
||||
#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
|
||||
#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
|
||||
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
|
||||
#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
|
||||
#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
|
||||
#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
|
||||
#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
|
||||
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
|
||||
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
|
||||
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
|
||||
@ -71,7 +71,7 @@
|
||||
#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
|
||||
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
|
||||
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
|
||||
#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
|
||||
#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
|
||||
|
||||
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
|
||||
|
||||
@ -84,17 +84,17 @@
|
||||
|
||||
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
|
||||
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
|
||||
#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK)
|
||||
#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
|
||||
#define PPSMC_MSG_spare_0x20 0x20
|
||||
#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg
|
||||
#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default
|
||||
#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
|
||||
#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
|
||||
|
||||
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
|
||||
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
|
||||
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
|
||||
#define PPSMC_MSG_Reserved 0x26 ///< Not used
|
||||
#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp
|
||||
#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp
|
||||
#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
|
||||
#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
|
||||
#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
|
||||
#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
|
||||
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
|
||||
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
|
||||
|
@ -115,6 +115,10 @@
|
||||
__SMU_DUMMY_MAP(PowerDownVcn), \
|
||||
__SMU_DUMMY_MAP(PowerUpJpeg), \
|
||||
__SMU_DUMMY_MAP(PowerDownJpeg), \
|
||||
__SMU_DUMMY_MAP(PowerUpJpeg0), \
|
||||
__SMU_DUMMY_MAP(PowerDownJpeg0), \
|
||||
__SMU_DUMMY_MAP(PowerUpJpeg1), \
|
||||
__SMU_DUMMY_MAP(PowerDownJpeg1), \
|
||||
__SMU_DUMMY_MAP(BacoAudioD3PME), \
|
||||
__SMU_DUMMY_MAP(ArmD3), \
|
||||
__SMU_DUMMY_MAP(RunDcBtc), \
|
||||
@ -135,6 +139,8 @@
|
||||
__SMU_DUMMY_MAP(PowerUpSdma), \
|
||||
__SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
|
||||
__SMU_DUMMY_MAP(SetHardMinVcn), \
|
||||
__SMU_DUMMY_MAP(SetHardMinVcn0), \
|
||||
__SMU_DUMMY_MAP(SetHardMinVcn1), \
|
||||
__SMU_DUMMY_MAP(SetAllowFclkSwitch), \
|
||||
__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
|
||||
__SMU_DUMMY_MAP(ActiveProcessNotify), \
|
||||
@ -150,6 +156,8 @@
|
||||
__SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \
|
||||
__SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \
|
||||
__SMU_DUMMY_MAP(SetSoftMinVcn), \
|
||||
__SMU_DUMMY_MAP(SetSoftMinVcn0), \
|
||||
__SMU_DUMMY_MAP(SetSoftMinVcn1), \
|
||||
__SMU_DUMMY_MAP(EnablePostCode), \
|
||||
__SMU_DUMMY_MAP(GetGfxclkFrequency), \
|
||||
__SMU_DUMMY_MAP(GetFclkFrequency), \
|
||||
@ -161,6 +169,8 @@
|
||||
__SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \
|
||||
__SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \
|
||||
__SMU_DUMMY_MAP(SetSoftMaxVcn), \
|
||||
__SMU_DUMMY_MAP(SetSoftMaxVcn0), \
|
||||
__SMU_DUMMY_MAP(SetSoftMaxVcn1), \
|
||||
__SMU_DUMMY_MAP(PowerGateMmHub), \
|
||||
__SMU_DUMMY_MAP(UpdatePmeRestore), \
|
||||
__SMU_DUMMY_MAP(GpuChangeState), \
|
||||
|
@ -1402,9 +1402,22 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
|
||||
i << 16U, NULL);
|
||||
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
|
||||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
|
||||
if (i == 0)
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
|
||||
i << 16U, NULL);
|
||||
else if (i == 1)
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
|
||||
i << 16U, NULL);
|
||||
} else {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
|
||||
i << 16U, NULL);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1415,9 +1428,34 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
|
||||
int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
|
||||
0, NULL);
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) ||
|
||||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) {
|
||||
if (i == 0)
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
|
||||
i << 16U, NULL);
|
||||
else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
|
||||
i << 16U, NULL);
|
||||
} else {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
|
||||
SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
|
||||
i << 16U, NULL);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v14_0_run_btc(struct smu_context *smu)
|
||||
|
@ -70,9 +70,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
|
||||
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
|
||||
MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1),
|
||||
MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1),
|
||||
MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1),
|
||||
MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1),
|
||||
MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1),
|
||||
MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1),
|
||||
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1),
|
||||
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
|
||||
@ -83,7 +86,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
|
||||
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
|
||||
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
|
||||
MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1),
|
||||
MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
|
||||
MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1),
|
||||
MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1),
|
||||
MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
|
||||
@ -91,9 +95,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
|
||||
MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
|
||||
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
|
||||
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
|
||||
MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
|
||||
MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1),
|
||||
MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1),
|
||||
MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1),
|
||||
MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1),
|
||||
MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1),
|
||||
MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1),
|
||||
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
|
||||
MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1),
|
||||
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
|
||||
|
@ -4111,6 +4111,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
|
||||
u32 overhead = 1000000;
|
||||
int symbol_cycles;
|
||||
|
||||
if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
|
||||
DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n",
|
||||
lane_count, hactive,
|
||||
bpp_x16 >> 4, (bpp_x16 & 0xf) * 625);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* DP Standard v2.1 2.6.4.1
|
||||
* SSC downspread and ref clock variation margin:
|
||||
|
@ -717,7 +717,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
|
||||
{
|
||||
intel_enable_dp(state, encoder, pipe_config, conn_state);
|
||||
intel_edp_backlight_on(pipe_config, conn_state);
|
||||
encoder->audio_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void vlv_enable_dp(struct intel_atomic_state *state,
|
||||
@ -726,7 +725,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
intel_edp_backlight_on(pipe_config, conn_state);
|
||||
encoder->audio_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
|
||||
|
@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
/* ensure all panel commands dispatched before enabling transcoder */
|
||||
wait_for_cmds_dispatched_to_panel(encoder);
|
||||
@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
|
||||
/* step6d: enable dsi transcoder */
|
||||
gen11_dsi_enable_transcoder(encoder);
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
/* step7: enable backlight */
|
||||
intel_backlight_enable(crtc_state, conn_state);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
|
||||
|
@ -1955,16 +1955,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
|
||||
* these devices we split the init OTP sequence into a deassert sequence and
|
||||
* the actual init OTP part.
|
||||
*/
|
||||
static void fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
{
|
||||
u8 *init_otp;
|
||||
int len;
|
||||
|
||||
/* Limit this to VLV for now. */
|
||||
if (!IS_VALLEYVIEW(i915))
|
||||
return;
|
||||
|
||||
/* Limit this to v1 vid-mode sequences */
|
||||
if (panel->vbt.dsi.config->is_cmd_mode ||
|
||||
panel->vbt.dsi.seq_version != 1)
|
||||
@ -2000,6 +1996,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some machines (eg. Lenovo 82TQ) appear to have broken
|
||||
* VBT sequences:
|
||||
* - INIT_OTP is not present at all
|
||||
* - what should be in INIT_OTP is in DISPLAY_ON
|
||||
* - what should be in DISPLAY_ON is in BACKLIGHT_ON
|
||||
* (along with the actual backlight stuff)
|
||||
*
|
||||
* To make those work we simply swap DISPLAY_ON and INIT_OTP.
|
||||
*
|
||||
* TODO: Do we need to limit this to specific machines,
|
||||
* or examine the contents of the sequences to
|
||||
* avoid false positives?
|
||||
*/
|
||||
static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
{
|
||||
if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
|
||||
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
|
||||
drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
|
||||
|
||||
swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
|
||||
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
|
||||
}
|
||||
}
|
||||
|
||||
static void fixup_mipi_sequences(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
{
|
||||
if (DISPLAY_VER(i915) >= 11)
|
||||
icl_fixup_mipi_sequences(i915, panel);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_fixup_mipi_sequences(i915, panel);
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi_sequence(struct drm_i915_private *i915,
|
||||
struct intel_panel *panel)
|
||||
@ -3351,6 +3382,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
|
||||
{
|
||||
const struct child_device_config *child = &devdata->child;
|
||||
|
||||
if (!devdata)
|
||||
return false;
|
||||
|
||||
if (!intel_bios_encoder_supports_dp(devdata) ||
|
||||
!intel_bios_encoder_supports_hdmi(devdata))
|
||||
return false;
|
||||
|
@ -36,12 +36,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(plane_state->uapi.plane->dev);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
u32 base;
|
||||
|
||||
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
|
||||
base = i915_gem_object_get_dma_address(obj, 0);
|
||||
base = plane_state->phys_dma_addr;
|
||||
else
|
||||
base = intel_plane_ggtt_offset(plane_state);
|
||||
|
||||
|
@ -727,6 +727,7 @@ struct intel_plane_state {
|
||||
#define PLANE_HAS_FENCE BIT(0)
|
||||
|
||||
struct intel_fb_view view;
|
||||
u32 phys_dma_addr; /* for cursor_needs_physical */
|
||||
|
||||
/* Plane pxp decryption state */
|
||||
bool decrypt;
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include "intel_dp_tunnel.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_drrs.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
@ -2683,15 +2684,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
|
||||
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
|
||||
}
|
||||
|
||||
static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
if (HAS_DOUBLE_BUFFERED_M_N(i915))
|
||||
return true;
|
||||
|
||||
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
|
||||
}
|
||||
|
||||
static bool can_enable_drrs(struct intel_connector *connector,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_display_mode *downclock_mode)
|
||||
@ -2714,7 +2706,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
|
||||
if (pipe_config->has_pch_encoder)
|
||||
return false;
|
||||
|
||||
if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
|
||||
if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
|
||||
return false;
|
||||
|
||||
return downclock_mode &&
|
||||
|
@ -2554,7 +2554,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
|
||||
static bool
|
||||
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
|
||||
{
|
||||
return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
|
||||
return ((IS_ELKHARTLAKE(i915) &&
|
||||
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
|
||||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
|
||||
i915->display.dpll.ref_clks.nssc == 38400;
|
||||
|
@ -63,6 +63,15 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
|
||||
return str[drrs_type];
|
||||
}
|
||||
|
||||
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
if (HAS_DOUBLE_BUFFERED_M_N(i915))
|
||||
return true;
|
||||
|
||||
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
|
||||
enum drrs_refresh_rate refresh_rate)
|
||||
@ -312,9 +321,8 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
|
||||
mutex_lock(&crtc->drrs.mutex);
|
||||
|
||||
seq_printf(m, "DRRS capable: %s\n",
|
||||
str_yes_no(crtc_state->has_drrs ||
|
||||
HAS_DOUBLE_BUFFERED_M_N(i915) ||
|
||||
intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
|
||||
str_yes_no(intel_cpu_transcoder_has_drrs(i915,
|
||||
crtc_state->cpu_transcoder)));
|
||||
|
||||
seq_printf(m, "DRRS enabled: %s\n",
|
||||
str_yes_no(crtc_state->has_drrs));
|
||||
|
@ -9,12 +9,15 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
enum drrs_type;
|
||||
enum transcoder;
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_connector;
|
||||
|
||||
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
|
||||
enum transcoder cpu_transcoder);
|
||||
const char *intel_drrs_type_str(enum drrs_type drrs_type);
|
||||
bool intel_drrs_is_active(struct intel_crtc *crtc);
|
||||
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
|
||||
|
@ -340,6 +340,17 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
|
||||
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
|
||||
}
|
||||
|
||||
static u32 dsb_chicken(struct intel_crtc *crtc)
|
||||
{
|
||||
if (crtc->mode_flags & I915_MODE_FLAG_VRR)
|
||||
return DSB_CTRL_WAIT_SAFE_WINDOW |
|
||||
DSB_CTRL_NO_WAIT_VBLANK |
|
||||
DSB_INST_WAIT_SAFE_WINDOW |
|
||||
DSB_INST_NO_WAIT_VBLANK;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
|
||||
int dewake_scanline)
|
||||
{
|
||||
@ -361,6 +372,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
|
||||
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
|
||||
ctrl | DSB_ENABLE);
|
||||
|
||||
intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
|
||||
dsb_chicken(crtc));
|
||||
|
||||
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
|
||||
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
|
||||
|
||||
|
@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
|
||||
return PTR_ERR(vma);
|
||||
|
||||
plane_state->ggtt_vma = vma;
|
||||
|
||||
/*
|
||||
* Pre-populate the dma address before we enter the vblank
|
||||
* evade critical section as i915_gem_object_get_dma_address()
|
||||
* will trigger might_sleep() even if it won't actually sleep,
|
||||
* which is the case when the fb has already been pinned.
|
||||
*/
|
||||
if (phys_cursor)
|
||||
plane_state->phys_dma_addr =
|
||||
i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
|
||||
} else {
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
|
||||
|
@ -1842,8 +1842,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
u32 temp;
|
||||
|
||||
encoder->audio_disable(encoder, old_crtc_state, conn_state);
|
||||
|
||||
intel_sdvo_set_active_outputs(intel_sdvo, 0);
|
||||
if (0)
|
||||
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
||||
@ -1935,8 +1933,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
|
||||
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
||||
DRM_MODE_DPMS_ON);
|
||||
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
|
||||
|
||||
encoder->audio_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
|
@ -187,10 +187,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
|
||||
/*
|
||||
* TRANS_SET_CONTEXT_LATENCY with VRR enabled
|
||||
* requires this chicken bit on ADL/DG2.
|
||||
* This bit seems to have two meanings depending on the platform:
|
||||
* TGL: generate VRR "safe window" for DSB vblank waits
|
||||
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) == 13)
|
||||
if (IS_DISPLAY_VER(dev_priv, 12, 13))
|
||||
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
|
||||
0, PIPE_VBLANK_WITH_DELAY);
|
||||
|
||||
|
@ -2295,6 +2295,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
|
||||
if (HAS_4TILE(i915))
|
||||
caps |= INTEL_PLANE_CAP_TILING_4;
|
||||
|
||||
if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
|
||||
return caps;
|
||||
|
||||
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
|
||||
caps |= INTEL_PLANE_CAP_CCS_RC;
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
|
@ -279,9 +279,6 @@ static int __engine_park(struct intel_wakeref *wf)
|
||||
intel_engine_park_heartbeat(engine);
|
||||
intel_breadcrumbs_park(engine->breadcrumbs);
|
||||
|
||||
/* Must be reset upon idling, or we may miss the busy wakeup. */
|
||||
GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
|
||||
|
||||
if (engine->park)
|
||||
engine->park(engine);
|
||||
|
||||
|
@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
|
||||
{
|
||||
cancel_timer(&engine->execlists.timer);
|
||||
cancel_timer(&engine->execlists.preempt);
|
||||
|
||||
/* Reset upon idling, or we may delay the busy wakeup. */
|
||||
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
|
||||
}
|
||||
|
||||
static void add_to_engine(struct i915_request *rq)
|
||||
|
@ -1653,6 +1653,7 @@ static void
|
||||
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
|
||||
{
|
||||
/* Wa_14018575942 / Wa_18018781329 */
|
||||
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
|
||||
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
|
||||
|
||||
/* Wa_22016670082 */
|
||||
|
@ -800,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto out_cleanup_modeset2;
|
||||
|
||||
ret = intel_pxp_init(i915);
|
||||
if (ret != -ENODEV)
|
||||
if (ret && ret != -ENODEV)
|
||||
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
|
||||
|
||||
ret = intel_display_driver_probe(i915);
|
||||
|
@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
|
||||
struct intel_uncore *uncore = ddat->uncore;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref) {
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref)
|
||||
intel_uncore_rmw(uncore, reg, clear, set);
|
||||
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
|
||||
else
|
||||
rgaddr = hwmon->rg.energy_status_all;
|
||||
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref) {
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref)
|
||||
reg_val = intel_uncore_read(uncore, rgaddr);
|
||||
|
||||
if (reg_val >= ei->reg_val_prev)
|
||||
ei->accum_energy += reg_val - ei->reg_val_prev;
|
||||
else
|
||||
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
|
||||
ei->reg_val_prev = reg_val;
|
||||
if (reg_val >= ei->reg_val_prev)
|
||||
ei->accum_energy += reg_val - ei->reg_val_prev;
|
||||
else
|
||||
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
|
||||
ei->reg_val_prev = reg_val;
|
||||
|
||||
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
|
||||
hwmon->scl_shift_energy);
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
|
||||
hwmon->scl_shift_energy);
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
|
||||
|
||||
/* Block waiting for GuC reset to complete when needed */
|
||||
for (;;) {
|
||||
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
|
||||
@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
|
||||
}
|
||||
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
|
||||
|
||||
schedule();
|
||||
}
|
||||
finish_wait(&ddat->waitq, &wait);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
|
||||
goto exit;
|
||||
|
||||
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
|
||||
if (val == PL1_DISABLE) {
|
||||
@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
|
||||
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
|
||||
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
|
||||
exit:
|
||||
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
|
||||
unlock:
|
||||
mutex_unlock(&hwmon->hwmon_lock);
|
||||
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,8 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#include "i915_memcpy.h"
|
||||
|
@ -4599,7 +4599,7 @@
|
||||
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
|
||||
_MTL_CHICKEN_TRANS_A, \
|
||||
_MTL_CHICKEN_TRANS_B)
|
||||
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
|
||||
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
|
||||
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
|
||||
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
|
||||
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gt/intel_engine.h"
|
||||
#include "gt/intel_engine_heartbeat.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/intel_tlb.h"
|
||||
|
||||
@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
|
||||
|
||||
static int __i915_vma_active(struct i915_active *ref)
|
||||
{
|
||||
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
|
||||
struct i915_vma *vma = active_to_vma(ref);
|
||||
|
||||
if (!i915_vma_tryget(vma))
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Exclude global GTT VMA from holding a GT wakeref
|
||||
* while active, otherwise GPU never goes idle.
|
||||
*/
|
||||
if (!i915_vma_is_ggtt(vma)) {
|
||||
/*
|
||||
* Since we and our _retire() counterpart can be
|
||||
* called asynchronously, storing a wakeref tracking
|
||||
* handle inside struct i915_vma is not safe, and
|
||||
* there is no other good place for that. Hence,
|
||||
* use untracked variants of intel_gt_pm_get/put().
|
||||
*/
|
||||
intel_gt_pm_get_untracked(vma->vm->gt);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __i915_vma_retire(struct i915_active *ref)
|
||||
{
|
||||
i915_vma_put(active_to_vma(ref));
|
||||
struct i915_vma *vma = active_to_vma(ref);
|
||||
|
||||
if (!i915_vma_is_ggtt(vma)) {
|
||||
/*
|
||||
* Since we can be called from atomic contexts,
|
||||
* use an async variant of intel_gt_pm_put().
|
||||
*/
|
||||
intel_gt_pm_put_async_untracked(vma->vm->gt);
|
||||
}
|
||||
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
|
||||
struct i915_vma_work *work = NULL;
|
||||
struct dma_fence *moving = NULL;
|
||||
struct i915_vma_resource *vma_res = NULL;
|
||||
intel_wakeref_t wakeref = 0;
|
||||
intel_wakeref_t wakeref;
|
||||
unsigned int bound;
|
||||
int err;
|
||||
|
||||
@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & PIN_GLOBAL)
|
||||
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
|
||||
/*
|
||||
* In case of a global GTT, we must hold a runtime-pm wakeref
|
||||
* while global PTEs are updated. In other cases, we hold
|
||||
* the rpm reference while the VMA is active. Since runtime
|
||||
* resume may require allocations, which are forbidden inside
|
||||
* vm->mutex, get the first rpm wakeref outside of the mutex.
|
||||
*/
|
||||
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
|
||||
|
||||
if (flags & vma->vm->bind_async_flags) {
|
||||
/* lock VM */
|
||||
@ -1561,8 +1598,7 @@ err_fence:
|
||||
if (work)
|
||||
dma_fence_work_commit_imm(&work->base);
|
||||
err_rpm:
|
||||
if (wakeref)
|
||||
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
|
||||
|
||||
if (moving)
|
||||
dma_fence_put(moving);
|
||||
|
@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
|
||||
dma_addr_t *dma_addrs;
|
||||
struct nouveau_fence *fence;
|
||||
|
||||
src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
|
||||
dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
|
||||
dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
|
||||
src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
|
||||
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
|
||||
dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
|
||||
npages);
|
||||
@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
|
||||
migrate_device_pages(src_pfns, dst_pfns, npages);
|
||||
nouveau_dmem_fence_done(&fence);
|
||||
migrate_device_finalize(src_pfns, dst_pfns, npages);
|
||||
kfree(src_pfns);
|
||||
kfree(dst_pfns);
|
||||
kvfree(src_pfns);
|
||||
kvfree(dst_pfns);
|
||||
for (i = 0; i < npages; i++)
|
||||
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
kfree(dma_addrs);
|
||||
kvfree(dma_addrs);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -421,7 +421,6 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
|
||||
{
|
||||
uint32_t handle;
|
||||
int idr_ret;
|
||||
int count = 0;
|
||||
again:
|
||||
idr_preload(GFP_ATOMIC);
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
@ -433,7 +432,6 @@ again:
|
||||
handle = idr_ret;
|
||||
|
||||
if (handle >= qdev->rom->n_surfaces) {
|
||||
count++;
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
idr_remove(&qdev->surf_id_idr, handle);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
|
@ -145,7 +145,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
struct qxl_release *release;
|
||||
struct qxl_bo *cmd_bo;
|
||||
void *fb_cmd;
|
||||
int i, ret, num_relocs;
|
||||
int i, ret;
|
||||
int unwritten;
|
||||
|
||||
switch (cmd->type) {
|
||||
@ -200,7 +200,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
}
|
||||
|
||||
/* fill out reloc info structs */
|
||||
num_relocs = 0;
|
||||
for (i = 0; i < cmd->relocs_num; ++i) {
|
||||
struct drm_qxl_reloc reloc;
|
||||
struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
|
||||
@ -230,7 +229,6 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
reloc_info[i].dst_bo = cmd_bo;
|
||||
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
|
||||
}
|
||||
num_relocs++;
|
||||
|
||||
/* reserve and validate the reloc dst bo */
|
||||
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
|
||||
|
@ -17,9 +17,7 @@
|
||||
|
||||
static const uint32_t formats_cluster[] = {
|
||||
DRM_FORMAT_XRGB2101010,
|
||||
DRM_FORMAT_ARGB2101010,
|
||||
DRM_FORMAT_XBGR2101010,
|
||||
DRM_FORMAT_ABGR2101010,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
|
@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
|
||||
entity->guilty = guilty;
|
||||
entity->num_sched_list = num_sched_list;
|
||||
entity->priority = priority;
|
||||
/*
|
||||
* It's perfectly valid to initialize an entity without having a valid
|
||||
* scheduler attached. It's just not valid to use the scheduler before it
|
||||
* is initialized itself.
|
||||
*/
|
||||
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
|
||||
RCU_INIT_POINTER(entity->last_scheduled, NULL);
|
||||
RB_CLEAR_NODE(&entity->rb_tree_node);
|
||||
|
||||
if (!sched_list[0]->sched_rq) {
|
||||
/* Warn drivers not to do this and to fix their DRM
|
||||
* calling order.
|
||||
if (num_sched_list && !sched_list[0]->sched_rq) {
|
||||
/* Since every entry covered by num_sched_list
|
||||
* should be non-NULL and therefore we warn drivers
|
||||
* not to do this and to fix their DRM calling order.
|
||||
*/
|
||||
pr_warn("%s: called with uninitialized scheduler\n", __func__);
|
||||
} else if (num_sched_list) {
|
||||
|
@ -1444,12 +1444,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
|
||||
root, "system_ttm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
|
||||
root, "vram_ttm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
|
||||
root, "gmr_ttm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
|
||||
root, "mob_ttm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
|
||||
root, "system_mob_ttm");
|
||||
if (vmw->has_gmr)
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
|
||||
root, "gmr_ttm");
|
||||
if (vmw->has_mob) {
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
|
||||
root, "mob_ttm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
|
||||
root, "system_mob_ttm");
|
||||
}
|
||||
}
|
||||
|
||||
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
||||
|
@ -144,9 +144,6 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
|
||||
.mem_type = XE_PL_TT,
|
||||
};
|
||||
*c += 1;
|
||||
|
||||
if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
|
||||
bo->props.preferred_mem_type = XE_PL_TT;
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,25 +178,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
|
||||
}
|
||||
places[*c] = place;
|
||||
*c += 1;
|
||||
|
||||
if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
|
||||
bo->props.preferred_mem_type = mem_type;
|
||||
}
|
||||
|
||||
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
|
||||
u32 bo_flags, u32 *c)
|
||||
{
|
||||
if (bo->props.preferred_gt == XE_GT1) {
|
||||
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
|
||||
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
|
||||
} else {
|
||||
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
|
||||
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
|
||||
}
|
||||
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
|
||||
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
|
||||
}
|
||||
|
||||
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
|
||||
@ -223,17 +210,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
|
||||
{
|
||||
u32 c = 0;
|
||||
|
||||
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
|
||||
|
||||
/* The order of placements should indicate preferred location */
|
||||
|
||||
if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
|
||||
try_add_system(xe, bo, bo_flags, &c);
|
||||
try_add_vram(xe, bo, bo_flags, &c);
|
||||
} else {
|
||||
try_add_vram(xe, bo, bo_flags, &c);
|
||||
try_add_system(xe, bo, bo_flags, &c);
|
||||
}
|
||||
try_add_vram(xe, bo, bo_flags, &c);
|
||||
try_add_system(xe, bo, bo_flags, &c);
|
||||
try_add_stolen(xe, bo, bo_flags, &c);
|
||||
|
||||
if (!c)
|
||||
@ -1126,13 +1104,6 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
|
||||
}
|
||||
}
|
||||
|
||||
static bool should_migrate_to_system(struct xe_bo *bo)
|
||||
{
|
||||
struct xe_device *xe = xe_bo_device(bo);
|
||||
|
||||
return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
|
||||
}
|
||||
|
||||
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
|
||||
@ -1141,7 +1112,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
|
||||
struct xe_bo *bo = ttm_to_xe_bo(tbo);
|
||||
bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
|
||||
vm_fault_t ret;
|
||||
int idx, r = 0;
|
||||
int idx;
|
||||
|
||||
if (needs_rpm)
|
||||
xe_device_mem_access_get(xe);
|
||||
@ -1153,17 +1124,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
|
||||
if (drm_dev_enter(ddev, &idx)) {
|
||||
trace_xe_bo_cpu_fault(bo);
|
||||
|
||||
if (should_migrate_to_system(bo)) {
|
||||
r = xe_bo_migrate(bo, XE_PL_TT);
|
||||
if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
else if (r)
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
}
|
||||
if (!ret)
|
||||
ret = ttm_bo_vm_fault_reserved(vmf,
|
||||
vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
drm_dev_exit(idx);
|
||||
} else {
|
||||
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
|
||||
@ -1291,9 +1253,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
|
||||
bo->flags = flags;
|
||||
bo->cpu_caching = cpu_caching;
|
||||
bo->ttm.base.funcs = &xe_gem_object_funcs;
|
||||
bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
|
||||
bo->props.preferred_gt = XE_BO_PROPS_INVALID;
|
||||
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
|
||||
bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
|
||||
INIT_LIST_HEAD(&bo->pinned_link);
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -56,25 +56,6 @@ struct xe_bo {
|
||||
*/
|
||||
struct list_head client_link;
|
||||
#endif
|
||||
/** @props: BO user controlled properties */
|
||||
struct {
|
||||
/** @preferred_mem: preferred memory class for this BO */
|
||||
s16 preferred_mem_class;
|
||||
/** @prefered_gt: preferred GT for this BO */
|
||||
s16 preferred_gt;
|
||||
/** @preferred_mem_type: preferred memory type */
|
||||
s32 preferred_mem_type;
|
||||
/**
|
||||
* @cpu_atomic: the CPU expects to do atomics operations to
|
||||
* this BO
|
||||
*/
|
||||
bool cpu_atomic;
|
||||
/**
|
||||
* @device_atomic: the device expects to do atomics operations
|
||||
* to this BO
|
||||
*/
|
||||
bool device_atomic;
|
||||
} props;
|
||||
/** @freed: List node for delayed put. */
|
||||
struct llist_node freed;
|
||||
/** @created: Whether the bo has passed initial creation */
|
||||
|
@ -58,7 +58,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
|
||||
|
||||
static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
|
||||
{
|
||||
if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id > XE_MAX_GT_PER_TILE))
|
||||
if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
|
||||
gt_id = 0;
|
||||
|
||||
return gt_id ? tile->media_gt : tile->primary_gt;
|
||||
@ -79,7 +79,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
|
||||
if (MEDIA_VER(xe) >= 13) {
|
||||
gt = xe_tile_get_gt(root_tile, gt_id);
|
||||
} else {
|
||||
if (drm_WARN_ON(&xe->drm, gt_id > XE_MAX_TILES_PER_DEVICE))
|
||||
if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
|
||||
gt_id = 0;
|
||||
|
||||
gt = xe->tiles[gt_id].primary_gt;
|
||||
|
@ -448,7 +448,7 @@ find_hw_engine(struct xe_device *xe,
|
||||
{
|
||||
u32 idx;
|
||||
|
||||
if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
|
||||
if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
|
||||
return NULL;
|
||||
|
||||
if (eci.gt_id >= xe->info.gt_count)
|
||||
|
@ -1220,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
|
||||
init_waitqueue_head(&ge->suspend_wait);
|
||||
|
||||
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
|
||||
q->sched_props.job_timeout_ms;
|
||||
msecs_to_jiffies(q->sched_props.job_timeout_ms);
|
||||
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
|
||||
get_submit_wq(guc),
|
||||
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
|
||||
|
@ -97,7 +97,6 @@ static void set_offsets(u32 *regs,
|
||||
#define REG16(x) \
|
||||
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
|
||||
(((x) >> 2) & 0x7f)
|
||||
#define END 0
|
||||
{
|
||||
const u32 base = hwe->mmio_base;
|
||||
|
||||
@ -168,7 +167,7 @@ static const u8 gen12_xcs_offsets[] = {
|
||||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 dg2_xcs_offsets[] = {
|
||||
@ -202,7 +201,7 @@ static const u8 dg2_xcs_offsets[] = {
|
||||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 gen12_rcs_offsets[] = {
|
||||
@ -298,7 +297,7 @@ static const u8 gen12_rcs_offsets[] = {
|
||||
REG(0x084),
|
||||
NOP(1),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 xehp_rcs_offsets[] = {
|
||||
@ -339,7 +338,7 @@ static const u8 xehp_rcs_offsets[] = {
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 dg2_rcs_offsets[] = {
|
||||
@ -382,7 +381,7 @@ static const u8 dg2_rcs_offsets[] = {
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 mtl_rcs_offsets[] = {
|
||||
@ -425,7 +424,7 @@ static const u8 mtl_rcs_offsets[] = {
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
#define XE2_CTX_COMMON \
|
||||
@ -471,7 +470,7 @@ static const u8 xe2_rcs_offsets[] = {
|
||||
LRI(1, 0), /* [0x47] */
|
||||
REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 xe2_bcs_offsets[] = {
|
||||
@ -482,16 +481,15 @@ static const u8 xe2_bcs_offsets[] = {
|
||||
REG16(0x200), /* [0x42] BCS_SWCTRL */
|
||||
REG16(0x204), /* [0x44] BLIT_CCTL */
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 xe2_xcs_offsets[] = {
|
||||
XE2_CTX_COMMON,
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
#undef END
|
||||
#undef REG16
|
||||
#undef REG
|
||||
#undef LRI
|
||||
|
@ -132,7 +132,7 @@ query_engine_cycles(struct xe_device *xe,
|
||||
return -EINVAL;
|
||||
|
||||
eci = &resp.eci;
|
||||
if (eci->gt_id > XE_MAX_GT_PER_TILE)
|
||||
if (eci->gt_id >= XE_MAX_GT_PER_TILE)
|
||||
return -EINVAL;
|
||||
|
||||
gt = xe_device_get_gt(xe, eci->gt_id);
|
||||
|
@ -494,6 +494,7 @@ config FB_SBUS_HELPERS
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB_IOMEM_FOPS
|
||||
|
||||
config FB_BW2
|
||||
bool "BWtwo support"
|
||||
@ -514,6 +515,7 @@ config FB_CG6
|
||||
depends on (FB = y) && (SPARC && FB_SBUS)
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB_IOMEM_FOPS
|
||||
help
|
||||
This is the frame buffer device driver for the CGsix (GX, TurboGX)
|
||||
frame buffer.
|
||||
@ -523,6 +525,7 @@ config FB_FFB
|
||||
depends on FB_SBUS && SPARC64
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB_IOMEM_FOPS
|
||||
help
|
||||
This is the frame buffer device driver for the Creator, Creator3D,
|
||||
and Elite3D graphics boards.
|
||||
|
@ -913,14 +913,25 @@ enum kfd_dbg_trap_exception_code {
|
||||
KFD_EC_MASK(EC_DEVICE_NEW))
|
||||
#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
|
||||
KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
|
||||
#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
|
||||
KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
|
||||
|
||||
/* Checks for exception code types for KFD search */
|
||||
#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
|
||||
#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
|
||||
(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
|
||||
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
|
||||
#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
|
||||
(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
|
||||
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
|
||||
#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
|
||||
(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
|
||||
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
|
||||
#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
|
||||
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
|
||||
|
||||
|
||||
/* Runtime enable states */
|
||||
|
Loading…
Reference in New Issue
Block a user