drm/i915/gem: Make i915_gem_shrinker multi-gt aware
Where applicable, use for_each_gt instead of to_gt in the i915_gem_shrinker functions to make them apply to more than just the primary GT. Specifically, this ensure i915_gem_shrink_all retires all requests across all GTs, and this makes i915_gem_shrinker_vmap unmap VMAs from all GTs. v2: Pass correct GT to intel_gt_retire_requests(Andrzej). v3: Remove unnecessary braces(Andi) v4: Undo v3 to fix build failure. Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230926093028.23614-1-nirmoy.das@intel.com
This commit is contained in:
parent
37d62359b1
commit
0951dce656
@ -14,6 +14,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/intel_gt.h"
|
||||
|
||||
#include "i915_trace.h"
|
||||
|
||||
@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
|
||||
intel_wakeref_t wakeref = 0;
|
||||
unsigned long count = 0;
|
||||
unsigned long scanned = 0;
|
||||
int err = 0;
|
||||
int err = 0, i = 0;
|
||||
struct intel_gt *gt;
|
||||
|
||||
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
|
||||
bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
|
||||
@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
|
||||
* what we can do is give them a kick so that we do not keep idle
|
||||
* contexts around longer than is necessary.
|
||||
*/
|
||||
if (shrink & I915_SHRINK_ACTIVE)
|
||||
/* Retire requests to unpin all idle contexts */
|
||||
intel_gt_retire_requests(to_gt(i915));
|
||||
if (shrink & I915_SHRINK_ACTIVE) {
|
||||
for_each_gt(gt, i915, i)
|
||||
/* Retire requests to unpin all idle contexts */
|
||||
intel_gt_retire_requests(gt);
|
||||
}
|
||||
|
||||
/*
|
||||
* As we may completely rewrite the (un)bound list whilst unbinding
|
||||
@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
||||
struct i915_vma *vma, *next;
|
||||
unsigned long freed_pages = 0;
|
||||
intel_wakeref_t wakeref;
|
||||
struct intel_gt *gt;
|
||||
int i;
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
|
||||
@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
||||
I915_SHRINK_VMAPS);
|
||||
|
||||
/* We also want to clear any cached iomaps as they wrap vmap */
|
||||
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
|
||||
list_for_each_entry_safe(vma, next,
|
||||
&to_gt(i915)->ggtt->vm.bound_list, vm_link) {
|
||||
unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
for_each_gt(gt, i915, i) {
|
||||
mutex_lock(>->ggtt->vm.mutex);
|
||||
list_for_each_entry_safe(vma, next,
|
||||
>->ggtt->vm.bound_list, vm_link) {
|
||||
unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (!vma->iomap || i915_vma_is_active(vma))
|
||||
continue;
|
||||
if (!vma->iomap || i915_vma_is_active(vma))
|
||||
continue;
|
||||
|
||||
if (!i915_gem_object_trylock(obj, NULL))
|
||||
continue;
|
||||
if (!i915_gem_object_trylock(obj, NULL))
|
||||
continue;
|
||||
|
||||
if (__i915_vma_unbind(vma) == 0)
|
||||
freed_pages += count;
|
||||
if (__i915_vma_unbind(vma) == 0)
|
||||
freed_pages += count;
|
||||
|
||||
i915_gem_object_unlock(obj);
|
||||
i915_gem_object_unlock(obj);
|
||||
}
|
||||
mutex_unlock(>->ggtt->vm.mutex);
|
||||
}
|
||||
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
|
||||
|
||||
*(unsigned long *)ptr += freed_pages;
|
||||
return NOTIFY_DONE;
|
||||
|
Loading…
x
Reference in New Issue
Block a user