drm fixes for 6.5-rc2

client:
 - memory leak fix
 
 dma-buf:
 - memory leak fix
 
 qaic:
 - bound check fixes
 - map_user_pages leak
 - int overflow fixes
 
 habanalabs:
 - debugfs stub helper
 
 nouveau:
 - aux event slot fixes
 - anx9805 cards fixes
 
 i915:
 - Add sentinel to xehp_oa_b_counters
 - Revert "drm/i915: use localized __diag_ignore_all() instead of per file"
 
 amdgpu:
 - More PCIe DPM fixes for Intel platforms
 - DCN3.0.1 fixes
 - Virtual display timer fix
 - Async flip fix
 - SMU13 clock reporting fixes
 - Add missing PSP firmware declaration
 - DP MST fix
 - DCN3.1.x fixes
 - Slab out of bounds fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmS59EIACgkQDHTzWXnE
 hr5nlw/+LzWqD+iuPfDzvy/AgYb9KE1tIIMda/Yv4yoxrm6yez5rjYmHpmCLBfT4
 ByCGUxWzDTi2aRiwVmAFcy+DswmHoSCVQxOdkkXrDuatPsHtnmGao+WTgj1E4p9Z
 Xsv6kDBH89295h13L835OO64x0Jav/LYFLv5BofTptNYg3/dCp2XNxbunuGNZSum
 SjfyaHL657+S2eANI0PVwOUdT5a8Z0ydxFCglbuiGyipFFBc4xtv4kZoJixcosUh
 qUdjkwTd2AhxONuFPZeuk6x5cbkmNzZgdXONV/NxLcxRAsGAYRF8s6zHZvzvM6aq
 nHyYZCidxZCbZ/EQIaZdwxaH026o6OGjCnMIHwipxBk/4H4mOEXAKrcJIlKKV2QG
 LremmMovu3pTnOYPilZcj8jKnOCkv+vZZne9jBPx7IT3680pe0Lp+CY4gK4Q3uJN
 9UeTXJT1tqXg4kBYsDBKEwBRXQtdKX/6efh4S92xZWDfqd1WFd8EYBzIj+VwAs/K
 JA8KU8P+ZwHzvTuOHAZTQC3dF1zzhwfy+jUC0RBYtT/15XIFliK6E72SPuB0xI3L
 Fy9FMsM2TTj/mexdGkCAvyGhPX3RtLXfixEgsxPpdtuO9PQ1jm7UzS2sdhmEDAkc
 ArvsNMqusgXQ8F8aMesIehQMkdagvOb9bgi6awQiSw9HbqMgRc0=
 =Dj2q
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2023-07-21' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Mostly amdgpu fixes, a couple of i915 fixes, some nouveau and then a
  few misc accel and other fixes.

  client:
   - memory leak fix

  dma-buf:
   - memory leak fix

  qaic:
   - bound check fixes
   - map_user_pages leak
   - int overflow fixes

  habanalabs:
   - debugfs stub helper

  nouveau:
   - aux event slot fixes
   - anx9805 cards fixes

  i915:
   - Add sentinel to xehp_oa_b_counters
   - Revert "drm/i915: use localized __diag_ignore_all() instead of per
     file"

  amdgpu:
   - More PCIe DPM fixes for Intel platforms
   - DCN3.0.1 fixes
   - Virtual display timer fix
   - Async flip fix
   - SMU13 clock reporting fixes
   - Add missing PSP firmware declaration
   - DP MST fix
   - DCN3.1.x fixes
   - Slab out of bounds fix"

* tag 'drm-fixes-2023-07-21' of git://anongit.freedesktop.org/drm/drm: (31 commits)
  accel/habanalabs: add more debugfs stub helpers
  drm/nouveau/kms/nv50-: init hpd_irq_lock for PIOR DP
  drm/nouveau/disp: PIOR DP uses GPIO for HPD, not PMGR AUX interrupts
  drm/nouveau/i2c: fix number of aux event slots
  drm/amdgpu: use a macro to define no xcp partition case
  drm/amdgpu/vm: use the same xcp_id from root PD
  drm/amdgpu: fix slab-out-of-bounds issue in amdgpu_vm_pt_create
  drm/amdgpu: Allocate root PD on correct partition
  drm/amd/display: Keep PHY active for DP displays on DCN31
  drm/amd/display: Prevent vtotal from being set to 0
  drm/amd/display: Disable MPC split by default on special asic
  drm/amd/display: check TG is non-null before checking if enabled
  drm/amd/display: Add polling method to handle MST reply packet
  drm/amd/display: Clean up errors & warnings in amdgpu_dm.c
  drm/amdgpu: Allow the initramfs generator to include psp_13_0_6_ta
  drm/amdgpu/pm: make mclk consistent for smu 13.0.7
  drm/amdgpu/pm: make gfxclock consistent for sienna cichlid
  drm/amd/display: only accept async flips for fast updates
  drm/amdgpu/vkms: relax timer deactivation by hrtimer_try_to_cancel
  drm/amd/display: add DCN301 specific logic for OTG programming
  ...
This commit is contained in:
Linus Torvalds 2023-07-20 20:35:38 -07:00
commit f7e3a1bafd
42 changed files with 612 additions and 242 deletions

View File

@ -3980,6 +3980,15 @@ static inline void hl_debugfs_fini(void)
{ {
} }
static inline int hl_debugfs_device_init(struct hl_device *hdev)
{
return 0;
}
static inline void hl_debugfs_device_fini(struct hl_device *hdev)
{
}
static inline void hl_debugfs_add_device(struct hl_device *hdev) static inline void hl_debugfs_add_device(struct hl_device *hdev)
{ {
} }

View File

@ -14,6 +14,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/types.h> #include <linux/types.h>
@ -366,7 +367,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap
if (in_trans->hdr.len % 8 != 0) if (in_trans->hdr.len % 8 != 0)
return -EINVAL; return -EINVAL;
if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_EXT_MSG_LENGTH) if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOSPC; return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, trans_wrapper = add_wrapper(wrappers,
@ -418,9 +419,12 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
} }
ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list); ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
if (ret < 0 || ret != nr_pages) { if (ret < 0)
ret = -EFAULT;
goto free_page_list; goto free_page_list;
if (ret != nr_pages) {
nr_pages = ret;
ret = -EFAULT;
goto put_pages;
} }
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
@ -557,11 +561,8 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
msg = &wrapper->msg; msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len); msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (msg_hdr_len > (UINT_MAX - QAIC_MANAGE_EXT_MSG_LENGTH))
return -EINVAL;
/* There should be enough space to hold at least one ASP entry. */ /* There should be enough space to hold at least one ASP entry. */
if (msg_hdr_len + sizeof(*out_trans) + sizeof(struct wire_addr_size_pair) > if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
QAIC_MANAGE_EXT_MSG_LENGTH) QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOMEM; return -ENOMEM;
@ -634,7 +635,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
msg = &wrapper->msg; msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len); msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (msg_hdr_len + sizeof(*out_trans) > QAIC_MANAGE_MAX_MSG_LENGTH) if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC; return -ENOSPC;
if (!in_trans->queue_size) if (!in_trans->queue_size)
@ -718,7 +719,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l
msg = &wrapper->msg; msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len); msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_MAX_MSG_LENGTH) if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC; return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper)); trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
@ -748,7 +749,8 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
int ret; int ret;
int i; int i;
if (!user_msg->count) { if (!user_msg->count ||
user_msg->len < sizeof(*trans_hdr)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -765,12 +767,13 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
} }
for (i = 0; i < user_msg->count; ++i) { for (i = 0; i < user_msg->count; ++i) {
if (user_len >= user_msg->len) { if (user_len > user_msg->len - sizeof(*trans_hdr)) {
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len); trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
if (user_len + trans_hdr->len > user_msg->len) { if (trans_hdr->len < sizeof(trans_hdr) ||
size_add(user_len, trans_hdr->len) > user_msg->len) {
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
@ -953,15 +956,23 @@ static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
int ret; int ret;
int i; int i;
if (msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH) if (msg_hdr_len < sizeof(*trans_hdr) ||
msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -EINVAL; return -EINVAL;
user_msg->len = 0; user_msg->len = 0;
user_msg->count = le32_to_cpu(msg->hdr.count); user_msg->count = le32_to_cpu(msg->hdr.count);
for (i = 0; i < user_msg->count; ++i) { for (i = 0; i < user_msg->count; ++i) {
u32 hdr_len;
if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
return -EINVAL;
trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len); trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
if (msg_len + le32_to_cpu(trans_hdr->len) > msg_hdr_len) hdr_len = le32_to_cpu(trans_hdr->len);
if (hdr_len < sizeof(*trans_hdr) ||
size_add(msg_len, hdr_len) > msg_hdr_len)
return -EINVAL; return -EINVAL;
switch (le32_to_cpu(trans_hdr->type)) { switch (le32_to_cpu(trans_hdr->type)) {

View File

@ -571,6 +571,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_for_each_fence_unlocked(&cursor, fence) { dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor)) { if (dma_resv_iter_is_restarted(&cursor)) {
struct dma_fence **new_fences;
unsigned int count; unsigned int count;
while (*num_fences) while (*num_fences)
@ -579,13 +580,17 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
count = cursor.num_fences + 1; count = cursor.num_fences + 1;
/* Eventually re-allocate the array */ /* Eventually re-allocate the array */
*fences = krealloc_array(*fences, count, new_fences = krealloc_array(*fences, count,
sizeof(void *), sizeof(void *),
GFP_KERNEL); GFP_KERNEL);
if (count && !*fences) { if (count && !new_fences) {
kfree(*fences);
*fences = NULL;
*num_fences = 0;
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);
return -ENOMEM; return -ENOMEM;
} }
*fences = new_fences;
} }
(*fences)[(*num_fences)++] = dma_fence_get(fence); (*fences)[(*num_fences)++] = dma_fence_get(fence);

View File

@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
} }
xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
0 : fpriv->xcp_id;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0; alloc_flags = 0;

View File

@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
pasid = 0; pasid = 0;
} }
r = amdgpu_vm_init(adev, &fpriv->vm); r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
if (r) if (r)
goto error_pasid; goto error_pasid;
r = amdgpu_xcp_open_device(adev, fpriv, file_priv); r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r) if (r)
goto error_vm; goto error_pasid;
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
if (r) if (r)

View File

@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
goto error_pasid; goto error_pasid;
} }
r = amdgpu_vm_init(adev, vm); r = amdgpu_vm_init(adev, vm, -1);
if (r) { if (r) {
DRM_ERROR("failed to initialize vm\n"); DRM_ERROR("failed to initialize vm\n");
goto error_pasid; goto error_pasid;

View File

@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
DRM_WARN("%s: vblank timer overrun\n", __func__); DRM_WARN("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc); ret = drm_crtc_handle_vblank(crtc);
/* Don't queue timer again when vblank is disabled. */
if (!ret) if (!ret)
DRM_ERROR("amdgpu_vkms failure on handling vblank"); return HRTIMER_NORESTART;
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
hrtimer_cancel(&amdgpu_crtc->vblank_timer); hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
} }
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,

View File

@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @xcp_id: GPU partition selection id
* *
* Init @vm fields. * Init @vm fields.
* *
* Returns: * Returns:
* 0 for success, error for failure. * 0 for success, error for failure.
*/ */
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
{ {
struct amdgpu_bo *root_bo; struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root; struct amdgpu_bo_vm *root;
@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->evicting = false; vm->evicting = false;
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
false, &root); false, &root, xcp_id);
if (r) if (r)
goto error_free_delayed; goto error_free_delayed;
root_bo = &root->bo; root_bo = &root->bo;

View File

@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
u32 pasid); u32 pasid);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate); struct amdgpu_bo_vm *vmbo, bool immediate);
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo); int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id);
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev, bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);

View File

@ -498,11 +498,12 @@ exit:
* @level: the page table level * @level: the page table level
* @immediate: use a immediate update * @immediate: use a immediate update
* @vmbo: pointer to the buffer object pointer * @vmbo: pointer to the buffer object pointer
* @xcp_id: GPU partition id
*/ */
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo) int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id)
{ {
struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct dma_resv *resv; struct dma_resv *resv;
@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.no_wait_gpu = immediate; bp.no_wait_gpu = immediate;
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; bp.xcp_id_plus1 = xcp_id + 1;
if (vm->root.bo) if (vm->root.bo)
bp.resv = vm->root.bo->tbo.base.resv; bp.resv = vm->root.bo->tbo.base.resv;
@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv; bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo); bp.bo_ptr_size = sizeof(struct amdgpu_bo);
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; bp.xcp_id_plus1 = xcp_id + 1;
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
return 0; return 0;
amdgpu_vm_eviction_unlock(vm); amdgpu_vm_eviction_unlock(vm);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
vm->root.bo->xcp_id);
amdgpu_vm_eviction_lock(vm); amdgpu_vm_eviction_lock(vm);
if (r) if (r)
return r; return r;

View File

@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
if (!adev->xcp_mgr) if (!adev->xcp_mgr)
return 0; return 0;
fpriv->xcp_id = ~0; fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
for (i = 0; i < MAX_XCP; ++i) { for (i = 0; i < MAX_XCP; ++i) {
if (!adev->xcp_mgr->xcp[i].ddev) if (!adev->xcp_mgr->xcp[i].ddev)
break; break;
@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
} }
} }
fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 : fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
return 0; return 0;
} }

View File

@ -37,6 +37,8 @@
#define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_NONE 0
#define AMDGPU_XCP_FL_LOCKED (1 << 0) #define AMDGPU_XCP_FL_LOCKED (1 << 0)
#define AMDGPU_XCP_NO_PARTITION (~0)
struct amdgpu_fpriv; struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK { enum AMDGPU_XCP_IP_BLOCK {

View File

@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
enum AMDGPU_XCP_IP_BLOCK ip_blk; enum AMDGPU_XCP_IP_BLOCK ip_blk;
uint32_t inst_mask; uint32_t inst_mask;
ring->xcp_id = ~0; ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return; return;
@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(
u32 sel_xcp_id; u32 sel_xcp_id;
int i; int i;
if (fpriv->xcp_id == ~0) { if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
u32 least_ref_cnt = ~0; u32 least_ref_cnt = ~0;
fpriv->xcp_id = 0; fpriv->xcp_id = 0;

View File

@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
/* For large FW files the time to complete can be very long */ /* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240 #define USBC_PD_POLLING_LIMIT_S 240

View File

@ -424,12 +424,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
amdgpu_crtc->pflip_status, amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED, AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id, amdgpu_crtc->crtc_id,
amdgpu_crtc); amdgpu_crtc);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return; return;
} }
@ -883,7 +883,7 @@ static int dm_set_powergating_state(void *handle,
} }
/* Prototypes of private functions */ /* Prototypes of private functions */
static int dm_early_init(void* handle); static int dm_early_init(void *handle);
/* Allocate memory for FBC compressed data */ /* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector) static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@ -1282,7 +1282,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
goto skip; goto skip;
if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
goto skip;
}
mutex_lock(&adev->dm.dc_lock); mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link); dc_link_dp_handle_automated_test(dc_link);
@ -1365,8 +1374,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
DP_TEST_RESPONSE, DP_TEST_RESPONSE,
&test_response.raw, &test_response.raw,
sizeof(test_response)); sizeof(test_response));
} } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
dc_link_check_link_loss_status(dc_link, &offload_work->data) && dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) { dc_link_dp_allow_hpd_rx_irq(dc_link)) {
/* offload_work->data is from handle_hpd_rx_irq-> /* offload_work->data is from handle_hpd_rx_irq->
@ -1554,7 +1562,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock); mutex_init(&adev->dm.audio_lock);
if(amdgpu_dm_irq_init(adev)) { if (amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error; goto error;
} }
@ -1696,9 +1704,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true; adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
adev->dm.dc->debug.disable_dsc = true; adev->dm.dc->debug.disable_dsc = true;
}
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true; adev->dm.dc->debug.disable_clock_gate = true;
@ -1942,8 +1949,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock); mutex_destroy(&adev->dm.dc_lock);
mutex_destroy(&adev->dm.dpia_aux_lock); mutex_destroy(&adev->dm.dpia_aux_lock);
return;
} }
static int load_dmcu_fw(struct amdgpu_device *adev) static int load_dmcu_fw(struct amdgpu_device *adev)
@ -1952,7 +1957,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
int r; int r;
const struct dmcu_firmware_header_v1_0 *hdr; const struct dmcu_firmware_header_v1_0 *hdr;
switch(adev->asic_type) { switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI) #if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI: case CHIP_TAHITI:
case CHIP_PITCAIRN: case CHIP_PITCAIRN:
@ -2709,7 +2714,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update; struct dc_stream_update stream_update;
} * bundle; } *bundle;
int k, m; int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
@ -2739,8 +2744,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
cleanup: cleanup:
kfree(bundle); kfree(bundle);
return;
} }
static int dm_resume(void *handle) static int dm_resume(void *handle)
@ -2954,8 +2957,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.set_powergating_state = dm_set_powergating_state, .set_powergating_state = dm_set_powergating_state,
}; };
const struct amdgpu_ip_block_version dm_ip_block = const struct amdgpu_ip_block_version dm_ip_block = {
{
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1, .major = 1,
.minor = 0, .minor = 0,
@ -3000,9 +3002,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false; caps->aux_support = false;
if (caps->ext_caps->bits.oled == 1 /*|| if (caps->ext_caps->bits.oled == 1
caps->ext_caps->bits.sdr_aux_backlight_control == 1 || /*
caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) * ||
* caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
* caps->ext_caps->bits.hdr_aux_backlight_control == 1
*/)
caps->aux_support = true; caps->aux_support = true;
if (amdgpu_backlight == 0) if (amdgpu_backlight == 0)
@ -3236,86 +3241,6 @@ static void handle_hpd_irq(void *param)
} }
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
u8 dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
const int max_process_count = 30;
int process_count = 0;
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
u8 retry;
dret = 0;
process_count++;
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
/* handle HPD short pulse irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
/* check if there is new irq to be handled */
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
new_irq_handled = false;
} else {
break;
}
}
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data) union hpd_irq_data hpd_irq_data)
{ {
@ -3377,7 +3302,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg(aconnector); bool skip = false;
/*
* DOWN_REP_MSG_RDY is also handled by polling method
* mgr->cbs->poll_hpd_irq()
*/
spin_lock(&offload_wq->offload_lock);
skip = offload_wq->is_handling_mst_msg_rdy_event;
if (!skip)
offload_wq->is_handling_mst_msg_rdy_event = true;
spin_unlock(&offload_wq->offload_lock);
if (!skip)
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
goto out; goto out;
} }
@ -3468,7 +3409,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
aconnector = to_amdgpu_dm_connector(connector); aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link; dc_link = aconnector->dc_link;
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd; int_params.irq_source = dc_link->irq_source_hpd;
@ -3477,7 +3418,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector); (void *) aconnector);
} }
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
/* Also register for DP short pulse (hpd_rx). */ /* Also register for DP short pulse (hpd_rx). */
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
@ -3486,11 +3427,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params, amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq, handle_hpd_rx_irq,
(void *) aconnector); (void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
aconnector;
} }
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
} }
} }
@ -3503,7 +3444,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0}; struct dc_interrupt_params int_params = {0};
int r; int r;
int i; int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@ -3517,11 +3458,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware. * coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
* for acknowledging and handling. */ * for acknowledging and handling.
*/
/* Use VBLANK interrupt */ /* Use VBLANK interrupt */
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) { if (r) {
DRM_ERROR("Failed to add crtc irq id!\n"); DRM_ERROR("Failed to add crtc irq id!\n");
return r; return r;
@ -3529,7 +3471,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source = int_params.irq_source =
dc_interrupt_to_irq_source(dc, i+1 , 0); dc_interrupt_to_irq_source(dc, i + 1, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
@ -3585,7 +3527,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0}; struct dc_interrupt_params int_params = {0};
int r; int r;
int i; int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->family >= AMDGPU_FAMILY_AI) if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE; client_id = SOC15_IH_CLIENTID_DCE;
@ -3602,7 +3544,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware. * coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
* for acknowledging and handling. */ * for acknowledging and handling.
*/
/* Use VBLANK interrupt */ /* Use VBLANK interrupt */
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
@ -4049,7 +3992,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
} }
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
unsigned *min, unsigned *max) unsigned int *min, unsigned int *max)
{ {
if (!caps) if (!caps)
return 0; return 0;
@ -4069,7 +4012,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness) uint32_t brightness)
{ {
unsigned min, max; unsigned int min, max;
if (!get_brightness_range(caps, &min, &max)) if (!get_brightness_range(caps, &min, &max))
return brightness; return brightness;
@ -4082,7 +4025,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness) uint32_t brightness)
{ {
unsigned min, max; unsigned int min, max;
if (!get_brightness_range(caps, &min, &max)) if (!get_brightness_range(caps, &min, &max))
return brightness; return brightness;
@ -4562,7 +4505,6 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{ {
drm_atomic_private_obj_fini(&dm->atomic_obj); drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
} }
/****************************************************************************** /******************************************************************************
@ -5394,6 +5336,7 @@ static bool adjust_colour_depth_from_display_info(
{ {
enum dc_color_depth depth = timing_out->display_color_depth; enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk; int normalized_clk;
do { do {
normalized_clk = timing_out->pix_clk_100hz / 10; normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
@ -5609,6 +5552,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
{ {
struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL; struct dc_sink *sink = NULL;
sink_init_data.link = aconnector->dc_link; sink_init_data.link = aconnector->dc_link;
sink_init_data.sink_signal = aconnector->dc_link->connector_signal; sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
@ -5732,7 +5676,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
return &aconnector->freesync_vid_base; return &aconnector->freesync_vid_base;
/* Find the preferred mode */ /* Find the preferred mode */
list_for_each_entry (m, list_head, head) { list_for_each_entry(m, list_head, head) {
if (m->type & DRM_MODE_TYPE_PREFERRED) { if (m->type & DRM_MODE_TYPE_PREFERRED) {
m_pref = m; m_pref = m;
break; break;
@ -5756,7 +5700,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
* For some monitors, preferred mode is not the mode with highest * For some monitors, preferred mode is not the mode with highest
* supported refresh rate. * supported refresh rate.
*/ */
list_for_each_entry (m, list_head, head) { list_for_each_entry(m, list_head, head) {
current_refresh = drm_mode_vrefresh(m); current_refresh = drm_mode_vrefresh(m);
if (m->hdisplay == m_pref->hdisplay && if (m->hdisplay == m_pref->hdisplay &&
@ -6028,7 +5972,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* This may not be an error, the use case is when we have no * This may not be an error, the use case is when we have no
* usermode calls to reset and set mode upon hotplug. In this * usermode calls to reset and set mode upon hotplug. In this
* case, we call set mode ourselves to restore the previous mode * case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in in time. * and the modelist may not be filled in time.
*/ */
DRM_DEBUG_DRIVER("No preferred mode found\n"); DRM_DEBUG_DRIVER("No preferred mode found\n");
} else { } else {
@ -6051,9 +5995,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_mode_set_crtcinfo(&mode, 0); drm_mode_set_crtcinfo(&mode, 0);
/* /*
* If scaling is enabled and refresh rate didn't change * If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings * we copy the vic and polarities of the old timings
*/ */
if (!scale || mode_refresh != preferred_refresh) if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode( fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL, stream, &mode, &aconnector->base, con_state, NULL,
@ -6817,6 +6761,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) { if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc; int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output; aconnector->force_yuv420_output;
color_depth = convert_color_depth_from_display_info(connector, color_depth = convert_color_depth_from_display_info(connector,
@ -7135,7 +7080,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
{ {
struct drm_display_mode *m; struct drm_display_mode *m;
list_for_each_entry (m, &aconnector->base.probed_modes, head) { list_for_each_entry(m, &aconnector->base.probed_modes, head) {
if (drm_mode_equal(m, mode)) if (drm_mode_equal(m, mode))
return true; return true;
} }
@ -7295,6 +7240,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock); mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
/* /*
* configure support HPD hot plug connector_>polled default value is 0 * configure support HPD hot plug connector_>polled default value is 0
@ -7454,7 +7400,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector; link->priv = aconnector;
DRM_DEBUG_DRIVER("%s()\n", __func__);
i2c = create_i2c(link->ddc, link->link_index, &res); i2c = create_i2c(link->ddc, link->link_index, &res);
if (!i2c) { if (!i2c) {
@ -8125,7 +8070,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Only allow immediate flips for fast updates that don't * Only allow immediate flips for fast updates that don't
* change memory domain, FB pitch, DCC state, rotation or * change memory domain, FB pitch, DCC state, rotation or
* mirroring. * mirroring.
*
* dm_crtc_helper_atomic_check() only accepts async flips with
* fast updates.
*/ */
if (crtc->state->async_flip &&
acrtc_state->update_type != UPDATE_TYPE_FAST)
drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n",
plane->base.id, plane->name);
bundle->flip_addrs[planes_count].flip_immediate = bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip && crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST && acrtc_state->update_type == UPDATE_TYPE_FAST &&
@ -8168,8 +8121,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* DRI3/Present extension with defined target_msc. * DRI3/Present extension with defined target_msc.
*/ */
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
} } else {
else {
/* For variable refresh rate mode only: /* For variable refresh rate mode only:
* Get vblank of last completed flip to avoid > 1 vrr * Get vblank of last completed flip to avoid > 1 vrr
* flips per video frame by use of throttling, but allow * flips per video frame by use of throttling, but allow
@ -8502,8 +8454,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_resource_state_copy_construct_current(dm->dc, dc_state); dc_resource_state_copy_construct_current(dm->dc, dc_state);
} }
for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) { new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@ -8526,9 +8478,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
drm_dbg_state(state->dev, drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
acrtc->crtc_id, acrtc->crtc_id,
new_crtc_state->enable, new_crtc_state->enable,
new_crtc_state->active, new_crtc_state->active,
@ -9104,8 +9054,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ); &commit->flip_done, 10*HZ);
if (ret == 0) if (ret == 0)
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
"timed out\n", crtc->base.id, crtc->name); crtc->base.id, crtc->name);
drm_crtc_commit_put(commit); drm_crtc_commit_put(commit);
} }
@ -9190,7 +9140,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
return false; return false;
} }
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
{
u64 num, den, res; u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
@ -9312,9 +9263,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
goto skip_modeset; goto skip_modeset;
drm_dbg_state(state->dev, drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
acrtc->crtc_id, acrtc->crtc_id,
new_crtc_state->enable, new_crtc_state->enable,
new_crtc_state->active, new_crtc_state->active,
@ -9343,8 +9292,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
old_crtc_state)) { old_crtc_state)) {
new_crtc_state->mode_changed = false; new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER( DRM_DEBUG_DRIVER(
"Mode change not required for front porch change, " "Mode change not required for front porch change, setting mode_changed to %d",
"setting mode_changed to %d",
new_crtc_state->mode_changed); new_crtc_state->mode_changed);
set_freesync_fixed_config(dm_new_crtc_state); set_freesync_fixed_config(dm_new_crtc_state);
@ -9356,9 +9304,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct drm_display_mode *high_mode; struct drm_display_mode *high_mode;
high_mode = get_highest_refresh_rate_mode(aconnector, false); high_mode = get_highest_refresh_rate_mode(aconnector, false);
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
set_freesync_fixed_config(dm_new_crtc_state); set_freesync_fixed_config(dm_new_crtc_state);
}
} }
ret = dm_atomic_get_state(state, &dm_state); ret = dm_atomic_get_state(state, &dm_state);
@ -9526,6 +9473,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/ */
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb; struct amdgpu_framebuffer *old_afb, *new_afb;
if (other->type == DRM_PLANE_TYPE_CURSOR) if (other->type == DRM_PLANE_TYPE_CURSOR)
continue; continue;
@ -9624,11 +9572,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
} }
/* Core DRM takes care of checking FB modifiers, so we only need to /* Core DRM takes care of checking FB modifiers, so we only need to
* check tiling flags when the FB doesn't have a modifier. */ * check tiling flags when the FB doesn't have a modifier.
*/
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
if (adev->family < AMDGPU_FAMILY_AI) { if (adev->family < AMDGPU_FAMILY_AI) {
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
} else { } else {
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
@ -9850,12 +9799,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and * cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's * positioning from the underlying pipe. Check the cursor plane's
* blending properties match the underlying planes'. */ * blending properties match the underlying planes'.
*/
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
if (!new_cursor_state || !new_cursor_state->fb) { if (!new_cursor_state || !new_cursor_state->fb)
return 0; return 0;
}
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
@ -9900,6 +9849,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
struct drm_connector_state *conn_state, *old_conn_state; struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_dm_connector *aconnector = NULL;
int i; int i;
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
if (!conn_state->crtc) if (!conn_state->crtc)
conn_state = old_conn_state; conn_state = old_conn_state;
@ -10334,7 +10284,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
} }
/* Store the overall update type for use later in atomic check. */ /* Store the overall update type for use later in atomic check. */
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct dm_crtc_state *dm_new_crtc_state = struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state); to_dm_crtc_state(new_crtc_state);
@ -10356,7 +10306,7 @@ fail:
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
else else
DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
trace_amdgpu_dm_atomic_check_finish(state, ret); trace_amdgpu_dm_atomic_check_finish(state, ret);

View File

@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
* we're handling link loss * we're handling link loss
*/ */
bool is_handling_link_loss; bool is_handling_link_loss;
/**
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
* ready event when we're already handling mst message ready event
*/
bool is_handling_mst_msg_rdy_event;
/** /**
* @aconnector: The aconnector that this work queue is attached to * @aconnector: The aconnector that this work queue is attached to
*/ */
@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port; struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root; struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux; struct drm_dp_aux *dsc_aux;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */ /* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c; struct amdgpu_i2c_adapter *i2c;

View File

@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
} }
/*
* Only allow async flips for fast updates that don't change the FB
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* In some use cases, like reset, no stream is attached */ /* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream) if (!dm_crtc_state->stream)
return 0; return 0;

View File

@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector; return connector;
} }
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
uint8_t dpcd_bytes_to_read;
const uint8_t max_process_count = 30;
uint8_t process_count = 0;
u8 retry;
struct amdgpu_dm_connector *aconnector =
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
mutex_lock(&aconnector->handle_mst_msg_ready);
while (process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
process_count++;
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
if (dret != dpcd_bytes_to_read) {
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
break;
}
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
switch (msg_rdy_type) {
case DOWN_REP_MSG_RDY_EVENT:
/* Only handle DOWN_REP_MSG_RDY case*/
esi[1] &= DP_DOWN_REP_MSG_RDY;
break;
case UP_REQ_MSG_RDY_EVENT:
/* Only handle UP_REQ_MSG_RDY case*/
esi[1] &= DP_UP_REQ_MSG_RDY;
break;
default:
/* Handle both cases*/
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
break;
}
if (!esi[1])
break;
/* handle MST irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
new_irq_handled = false;
} else {
break;
}
}
mutex_unlock(&aconnector->handle_mst_msg_ready);
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
{
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector, .add_connector = dm_dp_add_mst_connector,
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
}; };
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,

View File

@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
UP_REQ_MSG_RDY_EVENT = 2,
DOWN_OR_UP_MSG_RDY_EVENT = 3
};
struct amdgpu_display_manager; struct amdgpu_display_manager;
struct amdgpu_dm_connector; struct amdgpu_dm_connector;
@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type);
struct dsc_mst_fairness_vars { struct dsc_mst_fairness_vars {
int pbn; int pbn;
bool dsc_enabled; bool dsc_enabled;

View File

@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true; tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
} }
for (i = 0; i < dc->link_count; i++) { for (i = 0; i < dc->link_count; i++) {

View File

@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) if (pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true); hubp->funcs->set_blank(hubp, true);

View File

@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1; optc1->opp_count = 1;
} }
static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing) struct dc_crtc_timing *timing)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
} }
static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);

View File

@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
void optc3_set_odm_bypass(struct timing_generator *optc, void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing); const struct dc_crtc_timing *dc_crtc_timing);
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing);
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
#endif /* __DC_OPTC_DCN30_H__ */ #endif /* __DC_OPTC_DCN30_H__ */

View File

@ -11,7 +11,8 @@
# Makefile for dcn30. # Makefile for dcn30.
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
dcn301_optc.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))

View File

@ -0,0 +1,185 @@
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn301_optc.h"
#include "dc.h"
#include "dcn_calc_math.h"
#include "dc_dmub_srv.h"
#include "dml/dcn30/dcn30_fpu.h"
#include "dc_trace.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/**
* optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
*
* @optc: timing_generator instance.
* @params: parameters used for Dynamic Refresh Rate.
*/
void optc301_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
if (params->vertical_total_mid != 0) {
REG_SET(OTG_V_TOTAL_MID, 0,
OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
OTG_VTOTAL_MID_FRAME_NUM,
(uint8_t)params->vertical_total_mid_frame_num);
}
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
OTG_V_TOTAL_MIN_SEL, 0,
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
void optc301_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
OTG_TRIGA_POLARITY_SELECT, 0,
OTG_TRIGA_FREQUENCY_SELECT, 0,
OTG_TRIGA_DELAY, 0,
OTG_TRIGA_CLEAR, 1);
}
static struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc2_enable_crtc,
.disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank_color = optc3_program_blank_color,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.triplebuffer_lock = optc3_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc301_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc3_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
.set_gsl = optc2_set_gsl,
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc301_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
};
void dcn301_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn30_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}

View File

@ -0,0 +1,36 @@
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_OPTC_DCN301_H__
#define __DC_OPTC_DCN301_H__
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_optc.h"
void dcn301_timing_generator_init(struct optc *optc1);
void optc301_setup_manual_trigger(struct timing_generator *optc);
void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
#endif /* __DC_OPTC_DCN301_H__ */

View File

@ -42,7 +42,7 @@
#include "dcn30/dcn30_hubp.h" #include "dcn30/dcn30_hubp.h"
#include "irq/dcn30/irq_service_dcn30.h" #include "irq/dcn30/irq_service_dcn30.h"
#include "dcn30/dcn30_dpp.h" #include "dcn30/dcn30_dpp.h"
#include "dcn30/dcn30_optc.h" #include "dcn301/dcn301_optc.h"
#include "dcn20/dcn20_hwseq.h" #include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h" #include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h"
@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
tgn10->tg_shift = &optc_shift; tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask; tgn10->tg_mask = &optc_mask;
dcn30_timing_generator_init(tgn10); dcn301_timing_generator_init(tgn10);
return &tgn10->base; return &tgn10->base;
} }

View File

@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC, .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@ -295,7 +295,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe = &res_ctx->pipe_ctx[i]; pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing; timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; if (pipe->stream->adjust.v_total_min != 0)
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);

View File

@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result; return result;
} }
static bool intel_core_rkl_chk(void)
{
#if IS_ENABLED(CONFIG_X86_64)
struct cpuinfo_x86 *c = &cpu_data(0);
return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
#else
return false;
#endif
}
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{ {
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
data->pcie_dpm_key_disabled = data->pcie_dpm_key_disabled =
intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); !amdgpu_device_pcie_dynamic_switching_supported() ||
!(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */ /* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;

View File

@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GFX_MCLK: case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GFX_SCLK: case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
break; break;

View File

@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
break; break;
case AMDGPU_PP_SENSOR_GFX_MCLK: case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_7_get_smu_metrics_data(smu, ret = smu_v13_0_7_get_smu_metrics_data(smu,
METRICS_AVERAGE_UCLK, METRICS_CURR_UCLK,
(uint32_t *)data); (uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;

View File

@ -311,6 +311,9 @@ static bool drm_client_target_cloned(struct drm_device *dev,
can_clone = true; can_clone = true;
dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false); dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
if (!dmt_mode)
goto fail;
for (i = 0; i < connector_count; i++) { for (i = 0; i < connector_count; i++) {
if (!enabled[i]) if (!enabled[i])
continue; continue;
@ -326,11 +329,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
if (!modes[i]) if (!modes[i])
can_clone = false; can_clone = false;
} }
kfree(dmt_mode);
if (can_clone) { if (can_clone) {
DRM_DEBUG_KMS("can clone using 1024x768\n"); DRM_DEBUG_KMS("can clone using 1024x768\n");
return true; return true;
} }
fail:
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
return false; return false;
} }
@ -862,6 +867,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
break; break;
} }
kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode); modeset->mode = drm_mode_duplicate(dev, mode);
drm_connector_get(connector); drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector; modeset->connectors[modeset->num_connectors++] = connector;

View File

@ -23,6 +23,11 @@ subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += -I$(srctree)/$(src) subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted! # Please keep these build lists sorted!

View File

@ -16,9 +16,6 @@
#include "intel_display_reg_defs.h" #include "intel_display_reg_defs.h"
#include "intel_fbc.h" #include "intel_fbc.h"
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
static const struct intel_display_device_info no_display = {}; static const struct intel_display_device_info no_display = {};
#define PIPE_A_OFFSET 0x70000 #define PIPE_A_OFFSET 0x70000
@ -665,8 +662,6 @@ static const struct intel_display_device_info xe_lpdp_display = {
BIT(TRANSCODER_C) | BIT(TRANSCODER_D), BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
}; };
__diag_pop();
#undef INTEL_VGA_DEVICE #undef INTEL_VGA_DEVICE
#undef INTEL_QUANTA_VGA_DEVICE #undef INTEL_QUANTA_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) { id, info } #define INTEL_VGA_DEVICE(id, info) { id, info }

View File

@ -135,9 +135,6 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
return i915_gem_fb_mmap(obj, vma); return i915_gem_fb_mmap(obj, vma);
} }
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow overriding the default ops");
static const struct fb_ops intelfb_ops = { static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev), __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
@ -149,8 +146,6 @@ static const struct fb_ops intelfb_ops = {
.fb_mmap = intel_fbdev_mmap, .fb_mmap = intel_fbdev_mmap,
}; };
__diag_pop();
static int intelfb_alloc(struct drm_fb_helper *helper, static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes) struct drm_fb_helper_surface_size *sizes)
{ {

View File

@ -38,9 +38,6 @@
#include "i915_reg.h" #include "i915_reg.h"
#include "intel_pci_config.h" #include "intel_pci_config.h"
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
#define PLATFORM(x) .platform = (x) #define PLATFORM(x) .platform = (x)
#define GEN(x) \ #define GEN(x) \
.__runtime.graphics.ip.ver = (x), \ .__runtime.graphics.ip.ver = (x), \
@ -846,8 +843,6 @@ static const struct intel_device_info mtl_info = {
#undef PLATFORM #undef PLATFORM
__diag_pop();
/* /*
* Make sure any device matches here are from most specific to most * Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem * general. For example, since the Quanta match is based on the subsystem

View File

@ -4431,6 +4431,7 @@ static const struct i915_range mtl_oam_b_counters[] = {
static const struct i915_range xehp_oa_b_counters[] = { static const struct i915_range xehp_oa_b_counters[] = {
{ .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */ { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
{ .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */ { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
{}
}; };
static const struct i915_range gen7_oa_mux_regs[] = { static const struct i915_range gen7_oa_mux_regs[] = {

View File

@ -1877,6 +1877,8 @@ nv50_pior_destroy(struct drm_encoder *encoder)
nvif_outp_dtor(&nv_encoder->outp); nvif_outp_dtor(&nv_encoder->outp);
drm_encoder_cleanup(encoder); drm_encoder_cleanup(encoder);
mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
kfree(encoder); kfree(encoder);
} }
@ -1921,6 +1923,8 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = ddc; nv_encoder->i2c = ddc;
nv_encoder->aux = aux; nv_encoder->aux = aux;
mutex_init(&nv_encoder->dp.hpd_irq_lock);
encoder = to_drm_encoder(nv_encoder); encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads; encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0; encoder->possible_clones = 0;

View File

@ -16,7 +16,7 @@ struct nvkm_i2c_bus {
const struct nvkm_i2c_bus_func *func; const struct nvkm_i2c_bus_func *func;
struct nvkm_i2c_pad *pad; struct nvkm_i2c_pad *pad;
#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */ (n) #define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */ (n)
#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100) #define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x10)
#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */ -1 #define NVKM_I2C_BUS_PRI /* ccb primary comm. port */ -1
#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */ -2 #define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */ -2
int id; int id;
@ -38,7 +38,7 @@ struct nvkm_i2c_aux {
const struct nvkm_i2c_aux_func *func; const struct nvkm_i2c_aux_func *func;
struct nvkm_i2c_pad *pad; struct nvkm_i2c_pad *pad;
#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */ (n) #define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */ (n)
#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100) #define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x10)
int id; int id;
struct mutex mutex; struct mutex mutex;

View File

@ -81,20 +81,29 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
return -ENOSYS; return -ENOSYS;
list_for_each_entry(outp, &conn->disp->outps, head) { list_for_each_entry(outp, &conn->disp->outps, head) {
if (outp->info.connector == conn->index && outp->dp.aux) { if (outp->info.connector == conn->index)
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG; break;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG; }
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_I2C_IRQ;
return nvkm_uevent_add(uevent, &device->i2c->event, outp->dp.aux->id, bits, if (&outp->head == &conn->disp->outps)
nvkm_uconn_uevent_aux); return -EINVAL;
}
if (outp->dp.aux && !outp->info.location) {
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_I2C_IRQ;
return nvkm_uevent_add(uevent, &device->i2c->event, outp->dp.aux->id, bits,
nvkm_uconn_uevent_aux);
} }
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_GPIO_HI; if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_GPIO_HI;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO; if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
return -EINVAL; /* TODO: support DP IRQ on ANX9805 and remove this hack. */
if (!outp->info.location)
return -EINVAL;
}
return nvkm_uevent_add(uevent, &device->gpio->event, conn->info.hpd, bits, return nvkm_uevent_add(uevent, &device->gpio->event, conn->info.hpd, bits,
nvkm_uconn_uevent_gpio); nvkm_uconn_uevent_gpio);

View File

@ -260,10 +260,11 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
{ {
struct nvkm_bios *bios = device->bios; struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c; struct nvkm_i2c *i2c;
struct nvkm_i2c_aux *aux;
struct dcb_i2c_entry ccbE; struct dcb_i2c_entry ccbE;
struct dcb_output dcbE; struct dcb_output dcbE;
u8 ver, hdr; u8 ver, hdr;
int ret, i; int ret, i, ids;
if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL))) if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
@ -406,5 +407,11 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
} }
} }
return nvkm_event_init(&nvkm_i2c_intr_func, &i2c->subdev, 4, i, &i2c->event); ids = 0;
list_for_each_entry(aux, &i2c->aux, head)
ids = max(ids, aux->id + 1);
if (!ids)
return 0;
return nvkm_event_init(&nvkm_i2c_intr_func, &i2c->subdev, 4, ids, &i2c->event);
} }