drm/i915: Reduce i915_request_alloc retirement to local context
In the continual quest to reduce the amount of global work required when
submitting requests, replace i915_retire_requests() after allocation
failure to retiring just our ring.
v2: Don't forget the list iteration included an early break, so we would
never throttle on the last request in the ring/timeline.
v3: Use the common ring_retire_requests()
References: 11abf0c5a0
("drm/i915: Limit the backpressure for i915_request allocation")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190109215932.26454-1-chris@chris-wilson.co.uk
This commit is contained in:
@ -477,6 +477,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
|||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ring_retire_requests(struct intel_ring *ring)
|
||||||
|
{
|
||||||
|
struct i915_request *rq, *rn;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
|
||||||
|
if (!i915_request_completed(rq))
|
||||||
|
break;
|
||||||
|
|
||||||
|
i915_request_retire(rq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline struct i915_request *
|
||||||
|
i915_request_alloc_slow(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
struct intel_ring *ring = ce->ring;
|
||||||
|
struct i915_request *rq;
|
||||||
|
|
||||||
|
if (list_empty(&ring->request_list))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Ratelimit ourselves to prevent oom from malicious clients */
|
||||||
|
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
|
||||||
|
cond_synchronize_rcu(rq->rcustate);
|
||||||
|
|
||||||
|
/* Retire our old requests in the hope that we free some */
|
||||||
|
ring_retire_requests(ring);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_request_alloc - allocate a request structure
|
* i915_request_alloc - allocate a request structure
|
||||||
*
|
*
|
||||||
@ -559,15 +591,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||||||
rq = kmem_cache_alloc(i915->requests,
|
rq = kmem_cache_alloc(i915->requests,
|
||||||
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
||||||
if (unlikely(!rq)) {
|
if (unlikely(!rq)) {
|
||||||
i915_retire_requests(i915);
|
rq = i915_request_alloc_slow(ce);
|
||||||
|
|
||||||
/* Ratelimit ourselves to prevent oom from malicious clients */
|
|
||||||
rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
|
|
||||||
&i915->drm.struct_mutex);
|
|
||||||
if (rq)
|
|
||||||
cond_synchronize_rcu(rq->rcustate);
|
|
||||||
|
|
||||||
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
|
|
||||||
if (!rq) {
|
if (!rq) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_unreserve;
|
goto err_unreserve;
|
||||||
@ -1218,19 +1242,6 @@ complete:
|
|||||||
return timeout;
|
return timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ring_retire_requests(struct intel_ring *ring)
|
|
||||||
{
|
|
||||||
struct i915_request *request, *next;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(request, next,
|
|
||||||
&ring->request_list, ring_link) {
|
|
||||||
if (!i915_request_completed(request))
|
|
||||||
break;
|
|
||||||
|
|
||||||
i915_request_retire(request);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_retire_requests(struct drm_i915_private *i915)
|
void i915_retire_requests(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct intel_ring *ring, *tmp;
|
struct intel_ring *ring, *tmp;
|
||||||
|
Reference in New Issue
Block a user