drm/i915: Only enqueue already completed requests

If we are asked to submit a completed request, just move it onto the
active-list without modifying it's payload. If we try to emit the
modified payload of a completed request, we risk racing with the
ring->head update during retirement which may advance the head past our
breadcrumb and so we generate a warning for the emission being behind
the RING_HEAD.

v2: Commentary for the sneaky, shared responsibility between functions.
v3: Spelling mistakes and bonus assertion

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190923110056.15176-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson
2019-09-23 12:00:55 +01:00
parent 3231f8c011
commit c0bb487dc1
3 changed files with 74 additions and 38 deletions

View File

@ -799,6 +799,17 @@ static bool can_merge_rq(const struct i915_request *prev,
GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next));
/*
* We do not submit known completed requests. Therefore if the next
* request is already completed, we can pretend to merge it in
* with the previous context (and we will skip updating the ELSP
* and tracking). Thus hopefully keeping the ELSP full with active
* contexts, despite the best efforts of preempt-to-busy to confuse
* us.
*/
if (i915_request_completed(next))
return true;
if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false;
@ -1181,21 +1192,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
continue;
}
if (i915_request_completed(rq)) {
ve->request = NULL;
ve->base.execlists.queue_priority_hint = INT_MIN;
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
rq->engine = engine;
__i915_request_submit(rq);
spin_unlock(&ve->base.active.lock);
rb = rb_first_cached(&execlists->virtual);
continue;
}
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
return; /* leave this for another */
@ -1249,11 +1245,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(ve->siblings[0] != engine);
}
__i915_request_submit(rq);
if (!i915_request_completed(rq)) {
if (__i915_request_submit(rq)) {
submit = true;
last = rq;
}
/*
* Hmm, we have a bunch of virtual engine requests,
* but the first one was already completed (thanks
* preempt-to-busy!). Keep looking at the veng queue
* until we have no more relevant requests (i.e.
* the normal submit queue has higher priority).
*/
if (!submit) {
spin_unlock(&ve->base.active.lock);
rb = rb_first_cached(&execlists->virtual);
continue;
}
}
spin_unlock(&ve->base.active.lock);
@ -1266,8 +1274,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
if (i915_request_completed(rq))
goto skip;
bool merge = true;
/*
* Can we combine this request with the current port?
@ -1308,14 +1315,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
*port = execlists_schedule_in(last, port - execlists->pending);
port++;
merge = false;
}
last = rq;
submit = true;
skip:
__i915_request_submit(rq);
if (__i915_request_submit(rq)) {
if (!merge) {
*port = execlists_schedule_in(last, port - execlists->pending);
port++;
last = NULL;
}
GEM_BUG_ON(last &&
!can_merge_ctx(last->hw_context,
rq->hw_context));
submit = true;
last = rq;
}
}
rb_erase_cached(&p->node, &execlists->queue);