drm/xe: Invert mask and val in xe_mmio_wait32.
The order: 'offset, mask, val'; is more common in other drivers and in special in i915, where any dev could copy a sequence and end up with unexpected behavior. Done with coccinelle: @rule1@ expression gt, reg, val, mask, timeout, out, atomic; @@ - xe_mmio_wait32(gt, reg, val, mask, timeout, out, atomic) + xe_mmio_wait32(gt, reg, mask, val, timeout, out, atomic) spatch -sp_file mmio.cocci *.c *.h compat-i915-headers/intel_uncore.h \ --in-place v2: Rebased after changes on xe_guc_mcr usage of xe_mmio_wait32. Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
f83a30f466
commit
063e09af6e
@ -120,7 +120,7 @@ static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
|
||||
static int domain_sleep_wait(struct xe_gt *gt,
|
||||
struct xe_force_wake_domain *domain)
|
||||
{
|
||||
return xe_mmio_wait32(gt, domain->reg_ack, 0, domain->val,
|
||||
return xe_mmio_wait32(gt, domain->reg_ack, domain->val, 0,
|
||||
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
|
||||
NULL, false);
|
||||
}
|
||||
|
@ -456,8 +456,7 @@ static int do_gt_reset(struct xe_gt *gt)
|
||||
int err;
|
||||
|
||||
xe_mmio_write32(gt, GDRST, GRDOM_FULL);
|
||||
err = xe_mmio_wait32(gt, GDRST, 0, GRDOM_FULL, 5000,
|
||||
NULL, false);
|
||||
err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
|
||||
if (err)
|
||||
xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n",
|
||||
ERR_PTR(err));
|
||||
|
@ -290,8 +290,7 @@ int xe_guc_reset(struct xe_guc *guc)
|
||||
|
||||
xe_mmio_write32(gt, GDRST, GRDOM_GUC);
|
||||
|
||||
ret = xe_mmio_wait32(gt, GDRST, 0, GRDOM_GUC, 5000,
|
||||
&gdrst, false);
|
||||
ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
|
||||
if (ret) {
|
||||
drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n",
|
||||
gdrst);
|
||||
@ -386,10 +385,9 @@ static int guc_wait_ucode(struct xe_guc *guc)
|
||||
* 200ms. Even at slowest clock, this should be sufficient. And
|
||||
* in the working case, a larger timeout makes no difference.
|
||||
*/
|
||||
ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS,
|
||||
FIELD_PREP(GS_UKERNEL_MASK,
|
||||
XE_GUC_LOAD_STATUS_READY),
|
||||
GS_UKERNEL_MASK, 200000, &status, false);
|
||||
ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
|
||||
FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
|
||||
200000, &status, false);
|
||||
|
||||
if (ret) {
|
||||
struct drm_device *drm = &xe->drm;
|
||||
@ -639,10 +637,9 @@ retry:
|
||||
|
||||
xe_guc_notify(guc);
|
||||
|
||||
ret = xe_mmio_wait32(gt, reply_reg,
|
||||
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
|
||||
GUC_HXG_ORIGIN_GUC),
|
||||
GUC_HXG_MSG_0_ORIGIN, 50000, &reply, false);
|
||||
ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
|
||||
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
|
||||
50000, &reply, false);
|
||||
if (ret) {
|
||||
timeout:
|
||||
drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
|
||||
@ -654,11 +651,9 @@ timeout:
|
||||
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
|
||||
GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
|
||||
|
||||
ret = xe_mmio_wait32(gt, reply_reg,
|
||||
FIELD_PREP(GUC_HXG_MSG_0_TYPE,
|
||||
GUC_HXG_TYPE_RESPONSE_SUCCESS),
|
||||
GUC_HXG_MSG_0_TYPE, 1000000, &header,
|
||||
false);
|
||||
ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_TYPE,
|
||||
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS),
|
||||
1000000, &header, false);
|
||||
|
||||
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
|
||||
GUC_HXG_ORIGIN_GUC))
|
||||
|
@ -85,8 +85,7 @@ int xe_huc_auth(struct xe_huc *huc)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO,
|
||||
HUC_LOAD_SUCCESSFUL,
|
||||
ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO, HUC_LOAD_SUCCESSFUL,
|
||||
HUC_LOAD_SUCCESSFUL, 100000, NULL, false);
|
||||
if (ret) {
|
||||
drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
|
||||
|
@ -107,8 +107,8 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
|
||||
return (reg_val & mask) != eval ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
|
||||
u32 mask, u32 timeout_us, u32 *out_val,
|
||||
static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask,
|
||||
u32 val, u32 timeout_us, u32 *out_val,
|
||||
bool atomic)
|
||||
{
|
||||
ktime_t cur = ktime_get_raw();
|
||||
|
@ -68,7 +68,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
|
||||
xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
|
||||
xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
|
||||
|
||||
err = xe_mmio_wait32(gt, PCODE_MAILBOX, 0, PCODE_READY,
|
||||
err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
|
||||
timeout_ms * 1000, NULL, atomic);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -484,7 +484,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
|
||||
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
|
||||
|
||||
/* Wait for DMA to finish */
|
||||
ret = xe_mmio_wait32(gt, DMA_CTRL, 0, START_DMA, 100000, &dma_ctrl,
|
||||
ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
|
||||
false);
|
||||
if (ret)
|
||||
drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
|
||||
|
Loading…
x
Reference in New Issue
Block a user