drm/i915/gt: Protect signaler walk with RCU
While we know that the waiters cannot disappear as we walk our list (only that they might be added), the same cannot be said for our signalers as they may be completed by the HW and retired as we process this request. Ergo we need to use rcu to protect the list iteration and remember to mark up the list_del_rcu. v2: Mark the deps as safe-for-rcu Fixes: 793c22617367 ("drm/i915/gt: Protect execlists_hold/unhold from new waiters") Fixes: 32ff621fd744 ("drm/i915/gt: Allow temporary suspension of inflight requests") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200220075025.1539375-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
df6b1f3da8
commit
66940061a5
@ -1668,9 +1668,9 @@ last_active(const struct intel_engine_execlists *execlists)
|
||||
wait_link)
|
||||
|
||||
#define for_each_signaler(p__, rq__) \
|
||||
list_for_each_entry_lockless(p__, \
|
||||
&(rq__)->sched.signalers_list, \
|
||||
signal_link)
|
||||
list_for_each_entry_rcu(p__, \
|
||||
&(rq__)->sched.signalers_list, \
|
||||
signal_link)
|
||||
|
||||
static void defer_request(struct i915_request *rq, struct list_head * const pl)
|
||||
{
|
||||
@ -2533,11 +2533,13 @@ unlock:
|
||||
static bool hold_request(const struct i915_request *rq)
|
||||
{
|
||||
struct i915_dependency *p;
|
||||
bool result = false;
|
||||
|
||||
/*
|
||||
* If one of our ancestors is on hold, we must also be on hold,
|
||||
* otherwise we will bypass it and execute before it.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_signaler(p, rq) {
|
||||
const struct i915_request *s =
|
||||
container_of(p->signaler, typeof(*s), sched);
|
||||
@ -2545,11 +2547,13 @@ static bool hold_request(const struct i915_request *rq)
|
||||
if (s->engine != rq->engine)
|
||||
continue;
|
||||
|
||||
if (i915_request_on_hold(s))
|
||||
return true;
|
||||
result = i915_request_on_hold(s);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
|
||||
static void __execlists_unhold(struct i915_request *rq)
|
||||
|
@ -486,7 +486,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
|
||||
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->wait_link);
|
||||
list_del_rcu(&dep->wait_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
@ -497,7 +497,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
|
||||
GEM_BUG_ON(dep->signaler != node);
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->signal_link);
|
||||
list_del_rcu(&dep->signal_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
@ -526,7 +526,8 @@ static struct i915_global_scheduler global = { {
|
||||
int __init i915_global_scheduler_init(void)
|
||||
{
|
||||
global.slab_dependencies = KMEM_CACHE(i915_dependency,
|
||||
SLAB_HWCACHE_ALIGN);
|
||||
SLAB_HWCACHE_ALIGN |
|
||||
SLAB_TYPESAFE_BY_RCU);
|
||||
if (!global.slab_dependencies)
|
||||
return -ENOMEM;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user