drm/i915/selftest: use igt_vma_move_to_active_unlocked if possible

Helper replaces common sequence of calls.

Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221213121951.1515023-2-andrzej.hajda@intel.com
This commit is contained in:
Andrzej Hajda 2022-12-13 13:19:51 +01:00 committed by Andi Shyti
parent f350c74fed
commit 4f16749f89
6 changed files with 12 additions and 36 deletions

View File

@ -1551,9 +1551,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
@ -1686,9 +1684,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;

View File

@ -130,15 +130,11 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch; goto err_batch;
} }
i915_vma_lock(batch); err = igt_vma_move_to_active_unlocked(batch, rq, 0);
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;

View File

@ -2763,13 +2763,11 @@ static int create_gang(struct intel_engine_cs *engine,
rq->batch = i915_vma_get(vma); rq->batch = i915_vma_get(vma);
i915_request_get(rq); i915_request_get(rq);
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
if (!err) if (!err)
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
i915_vma_offset(vma), i915_vma_offset(vma),
PAGE_SIZE, 0); PAGE_SIZE, 0);
i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
if (err) if (err)
goto err_rq; goto err_rq;
@ -3177,9 +3175,7 @@ create_gpr_client(struct intel_engine_cs *engine,
goto out_batch; goto out_batch;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
i915_vma_lock(batch); i915_vma_lock(batch);
if (!err) if (!err)
@ -3514,13 +3510,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
} }
if (vma) { if (vma) {
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, 0);
err = i915_vma_move_to_active(vma, rq, 0);
if (!err) if (!err)
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
i915_vma_offset(vma), i915_vma_offset(vma),
PAGE_SIZE, 0); PAGE_SIZE, 0);
i915_vma_unlock(vma);
} }
i915_request_add(rq); i915_request_add(rq);

View File

@ -599,9 +599,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
*cs++ = 0; *cs++ = 0;
} }
i915_vma_lock(scratch); err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(scratch);
i915_request_get(rq); i915_request_get(rq);
i915_request_add(rq); i915_request_add(rq);

View File

@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg,
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
/* Read the mocs tables back using SRM */ /* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma); offset = i915_ggtt_offset(vma);

View File

@ -138,9 +138,7 @@ read_nonprivs(struct intel_context *ce)
goto err_pin; goto err_pin;
} }
i915_vma_lock(vma); err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto err_req; goto err_req;
@ -853,9 +851,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
i915_vma_lock(results); err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(results);
if (err) if (err)
goto err_req; goto err_req;
@ -935,9 +931,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
goto err_request; goto err_request;
} }
i915_vma_lock(batch); err = igt_vma_move_to_active_unlocked(batch, rq, 0);
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto err_request; goto err_request;