drm/i915: call i915_request_await_object from _i915_vma_move_to_active
Since almost all calls to i915_vma_move_to_active are prepended with i915_request_await_object, let's call the latter from _i915_vma_move_to_active by default and add flag allowing bypassing it. Adjust all callers accordingly. The patch should not introduce functional changes. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221019215906.295296-2-andrzej.hajda@intel.com
This commit is contained in:
parent
5664561cbb
commit
2a76fc899a
@ -53,13 +53,13 @@ enum {
|
||||
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
|
||||
};
|
||||
|
||||
/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
|
||||
#define __EXEC_OBJECT_HAS_PIN BIT(30)
|
||||
#define __EXEC_OBJECT_HAS_FENCE BIT(29)
|
||||
#define __EXEC_OBJECT_USERPTR_INIT BIT(28)
|
||||
#define __EXEC_OBJECT_NEEDS_MAP BIT(27)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS BIT(26)
|
||||
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */
|
||||
/* __EXEC_OBJECT_ flags > BIT(29) defined in i915_vma.h */
|
||||
#define __EXEC_OBJECT_HAS_PIN BIT(29)
|
||||
#define __EXEC_OBJECT_HAS_FENCE BIT(28)
|
||||
#define __EXEC_OBJECT_USERPTR_INIT BIT(27)
|
||||
#define __EXEC_OBJECT_NEEDS_MAP BIT(26)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS BIT(25)
|
||||
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 25) /* all of the above + */
|
||||
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
|
||||
|
||||
#define __EXEC_HAS_RELOC BIT(31)
|
||||
@ -2101,7 +2101,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
|
||||
eb->composite_fence ?
|
||||
eb->composite_fence :
|
||||
&eb->requests[j]->fence,
|
||||
flags | __EXEC_OBJECT_NO_RESERVE);
|
||||
flags | __EXEC_OBJECT_NO_RESERVE |
|
||||
__EXEC_OBJECT_NO_REQUEST_AWAIT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -464,9 +464,7 @@ static int move_to_active(struct i915_vma *vma,
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
return err;
|
||||
|
@ -239,9 +239,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
|
||||
}
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
|
||||
out_rq:
|
||||
i915_request_add(rq);
|
||||
|
@ -984,15 +984,11 @@ retry:
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
@ -1553,9 +1549,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
@ -1689,9 +1683,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
@ -565,10 +565,8 @@ retry:
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq,
|
||||
EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq,
|
||||
EXEC_OBJECT_WRITE);
|
||||
|
||||
i915_request_add(rq);
|
||||
err_unpin:
|
||||
@ -1608,9 +1606,7 @@ retry:
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
|
||||
err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
|
||||
i915_request_get(rq);
|
||||
|
@ -131,17 +131,13 @@ int igt_gpu_fill_dw(struct intel_context *ce,
|
||||
}
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
@ -215,9 +215,7 @@ int intel_renderstate_emit(struct intel_renderstate *so,
|
||||
if (!so->vma)
|
||||
return 0;
|
||||
|
||||
err = i915_request_await_object(rq, so->vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(so->vma, rq, 0);
|
||||
err = i915_vma_move_to_active(so->vma, rq, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -3189,9 +3189,7 @@ retry:
|
||||
goto err_vma;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
if (err == 0)
|
||||
err = wa_list_srm(rq, wal, vma);
|
||||
|
||||
|
@ -2764,9 +2764,7 @@ static int create_gang(struct intel_engine_cs *engine,
|
||||
i915_request_get(rq);
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (!err)
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
vma->node.start,
|
||||
@ -3180,14 +3178,10 @@ create_gpr_client(struct intel_engine_cs *engine,
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
i915_vma_lock(batch);
|
||||
if (!err)
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
if (!err)
|
||||
@ -3521,9 +3515,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
|
||||
|
||||
if (vma) {
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (!err)
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
vma->node.start,
|
||||
|
@ -106,10 +106,7 @@ static int move_to_active(struct i915_vma *vma,
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj,
|
||||
flags & EXEC_OBJECT_WRITE);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
return err;
|
||||
@ -1520,15 +1517,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
|
||||
}
|
||||
|
||||
i915_vma_lock(arg.vma);
|
||||
err = i915_request_await_object(rq, arg.vma->obj,
|
||||
flags & EXEC_OBJECT_WRITE);
|
||||
if (err == 0) {
|
||||
err = i915_vma_move_to_active(arg.vma, rq, flags);
|
||||
if (err)
|
||||
pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
|
||||
} else {
|
||||
pr_err("[%s] Request await failed: %d!\n", engine->name, err);
|
||||
}
|
||||
err = i915_vma_move_to_active(arg.vma, rq, flags);
|
||||
if (err)
|
||||
pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
|
||||
|
||||
i915_vma_unlock(arg.vma);
|
||||
|
||||
|
@ -452,9 +452,7 @@ retry:
|
||||
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
|
||||
*cs++ = 0;
|
||||
|
||||
err = i915_request_await_object(rq, scratch->obj, true);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
@ -602,9 +600,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
|
||||
}
|
||||
|
||||
i915_vma_lock(scratch);
|
||||
err = i915_request_await_object(rq, scratch->obj, true);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(scratch);
|
||||
|
||||
i915_request_get(rq);
|
||||
@ -1060,9 +1056,7 @@ static int move_to_active(struct i915_request *rq,
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, flags);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
return err;
|
||||
|
@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg,
|
||||
return PTR_ERR(rq);
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
/* Read the mocs tables back using SRM */
|
||||
|
@ -652,9 +652,7 @@ int live_rps_frequency_cs(void *arg)
|
||||
goto err_vma;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (!err)
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
vma->node.start,
|
||||
@ -793,9 +791,7 @@ int live_rps_frequency_srm(void *arg)
|
||||
goto err_vma;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (!err)
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
vma->node.start,
|
||||
|
@ -139,9 +139,7 @@ read_nonprivs(struct intel_context *ce)
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto err_req;
|
||||
@ -632,16 +630,12 @@ retry:
|
||||
goto err_request;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
err = i915_request_await_object(rq, scratch->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(scratch, rq,
|
||||
EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(scratch, rq,
|
||||
EXEC_OBJECT_WRITE);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
@ -860,9 +854,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
|
||||
return PTR_ERR(rq);
|
||||
|
||||
i915_vma_lock(results);
|
||||
err = i915_request_await_object(rq, results->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(results);
|
||||
if (err)
|
||||
goto err_req;
|
||||
@ -944,9 +936,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
|
||||
}
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
@ -570,9 +570,8 @@ retry:
|
||||
if (gmadr_bytes == 8)
|
||||
bb->bb_start_cmd_va[2] = 0;
|
||||
|
||||
ret = i915_vma_move_to_active(bb->vma,
|
||||
workload->req,
|
||||
0);
|
||||
ret = i915_vma_move_to_active(bb->vma, workload->req,
|
||||
__EXEC_OBJECT_NO_REQUEST_AWAIT);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -2253,9 +2253,7 @@ retry:
|
||||
goto err_add_request;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(rq, vma->obj, 0);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
if (err)
|
||||
goto err_add_request;
|
||||
|
||||
|
@ -1844,6 +1844,11 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
|
||||
|
||||
GEM_BUG_ON(!vma->pages);
|
||||
|
||||
if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
|
||||
err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
}
|
||||
err = __i915_vma_move_to_active(vma, rq);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
@ -55,6 +55,7 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
|
||||
|
||||
/* do not reserve memory to prevent deadlocks */
|
||||
#define __EXEC_OBJECT_NO_RESERVE BIT(31)
|
||||
#define __EXEC_OBJECT_NO_REQUEST_AWAIT BIT(30)
|
||||
|
||||
int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
|
@ -1223,9 +1223,7 @@ static int live_all_engines(void *arg)
|
||||
goto out_request;
|
||||
}
|
||||
|
||||
err = i915_request_await_object(request[idx], batch->obj, 0);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, request[idx], 0);
|
||||
err = i915_vma_move_to_active(batch, request[idx], 0);
|
||||
GEM_BUG_ON(err);
|
||||
|
||||
err = engine->emit_bb_start(request[idx],
|
||||
@ -1352,10 +1350,7 @@ static int live_sequential_engines(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
err = i915_request_await_object(request[idx],
|
||||
batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, request[idx], 0);
|
||||
err = i915_vma_move_to_active(batch, request[idx], 0);
|
||||
GEM_BUG_ON(err);
|
||||
|
||||
err = engine->emit_bb_start(request[idx],
|
||||
|
@ -126,10 +126,7 @@ static int move_to_active(struct i915_vma *vma,
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj,
|
||||
flags & EXEC_OBJECT_WRITE);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
return err;
|
||||
|
Loading…
x
Reference in New Issue
Block a user