drm/i915/gt: Avoid resetting ring->head outside of its timeline mutex

We manipulate ring->head while active in i915_request_retire underneath
the timeline manipulation. We cannot rely on a stable ring->head outside
of the timeline->mutex, in particular while setting up the context for
resume and reset.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1126
Fixes: 0881954965 ("drm/i915: Introduce intel_context.pin_mutex for pin management")
Fixes: e5dadff4b0 ("drm/i915: Protect request retirement with timeline->mutex")
References: f3c0efc9fe ("drm/i915/execlists: Leave resetting ring to intel_ring")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200211120131.958949-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-02-11 12:01:31 +00:00
parent 3d9c13a6a7
commit 42827350f7
3 changed files with 21 additions and 23 deletions

View File

@ -235,7 +235,8 @@ static void execlists_init_reg_state(u32 *reg_state,
bool close);
static void
__execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine);
const struct intel_engine_cs *engine,
u32 head);
static void mark_eio(struct i915_request *rq)
{
@ -1184,12 +1185,11 @@ static void reset_active(struct i915_request *rq,
head = rq->tail;
else
head = active_request(ce->timeline, rq)->head;
ce->ring->head = intel_ring_wrap(ce->ring, head);
intel_ring_update_space(ce->ring);
head = intel_ring_wrap(ce->ring, head);
/* Scrub the context image to prevent replaying the previous batch */
restore_default_state(ce, engine);
__execlists_update_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine, head);
/* We've switched away, so this should be a no-op, but intent matters */
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
@ -2878,16 +2878,17 @@ static void execlists_context_unpin(struct intel_context *ce)
static void
__execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine)
const struct intel_engine_cs *engine,
u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = ring->head;
regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
/* RPCS */
@ -2916,7 +2917,7 @@ __execlists_context_pin(struct intel_context *ce,
ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine, ce->ring->tail);
return 0;
}
@ -2941,7 +2942,7 @@ static void execlists_context_reset(struct intel_context *ce)
/* Scrub away the garbage */
execlists_init_reg_state(ce->lrc_reg_state,
ce, ce->engine, ce->ring, true);
__execlists_update_reg_state(ce, ce->engine);
__execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
@ -3538,6 +3539,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
u32 head;
mb(); /* paranoia: read the CSB pointers from after the reset */
clflush(execlists->csb_write);
@ -3565,15 +3567,15 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (i915_request_completed(rq)) {
/* Idle context; tidy up the ring so we can restart afresh */
ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
rq = active_request(ce->timeline, rq);
ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(ce->ring->head == ce->ring->tail);
head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@ -3618,10 +3620,9 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
head, ce->ring->tail);
__execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine, head);
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
@ -5265,10 +5266,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head = head;
intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine, head);
}
bool

View File

@ -39,9 +39,9 @@ struct intel_ring {
*/
atomic_t pin_count;
u32 head;
u32 tail;
u32 emit;
u32 head; /* updated during retire, loosely tracks RING_HEAD */
u32 tail; /* updated on submission, used for RING_TAIL */
u32 emit; /* updated during request construction */
u32 space;
u32 size;

View File

@ -201,7 +201,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
}
GEM_BUG_ON(!ce[1]->ring->size);
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
__execlists_update_reg_state(ce[1], engine);
__execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq[0])) {