drm/i915: Use kmap_local_page() in gem/i915_gem_execbuffer.c
The use of kmap_atomic() is being deprecated in favor of kmap_local_page()[1], and this patch converts the calls from kmap_atomic() to kmap_local_page(). The main difference between atomic and local mappings is that local mappings doesn't disable page faults or preemption (the preemption is disabled for !PREEMPT_RT case, otherwise it only disables migration). With kmap_local_page(), we can avoid the often unwanted side effect of unnecessary page faults and preemption disables. In i915_gem_execbuffer.c, eb->reloc_cache.vaddr is mapped by kmap_atomic() in eb_relocate_entry(), and is unmapped by kunmap_atomic() in reloc_cache_reset(). And this mapping/unmapping occurs in two places: one is in eb_relocate_vma(), and another is in eb_relocate_vma_slow(). The function eb_relocate_vma() or eb_relocate_vma_slow() doesn't need to disable pagefaults and preemption during the above mapping/ unmapping. So it can simply use kmap_local_page() / kunmap_local() that can instead do the mapping / unmapping regardless of the context. Convert the calls of kmap_atomic() / kunmap_atomic() to kmap_local_page() / kunmap_local(). [1]: https://lore.kernel.org/all/20220813220034.806698-1-ira.weiny@intel.com Suggested-by: Ira Weiny <ira.weiny@intel.com> Suggested-by: Fabio M. De Francesco <fmdefrancesco@gmail.com> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Fabio M. De Francesco <fmdefrancesco@gmail.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231203132947.2328805-10-zhao1.liu@linux.intel.com
This commit is contained in:
parent
e4865c60dd
commit
31accc37ea
@ -1158,7 +1158,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
|
||||
|
||||
vaddr = unmask_page(cache->vaddr);
|
||||
if (cache->vaddr & KMAP)
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
else
|
||||
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
||||
}
|
||||
@ -1174,7 +1174,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
|
||||
if (cache->vaddr & KMAP) {
|
||||
struct page *page = i915_gem_object_get_page(obj, cache->page);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_local_page(page);
|
||||
cache->vaddr = unmask_flags(cache->vaddr) |
|
||||
(unsigned long)vaddr;
|
||||
} else {
|
||||
@ -1204,7 +1204,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
|
||||
if (cache->vaddr & CLFLUSH_AFTER)
|
||||
mb();
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
i915_gem_object_finish_access(obj);
|
||||
} else {
|
||||
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
||||
@ -1236,7 +1236,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
||||
struct page *page;
|
||||
|
||||
if (cache->vaddr) {
|
||||
kunmap_atomic(unmask_page(cache->vaddr));
|
||||
kunmap_local(unmask_page(cache->vaddr));
|
||||
} else {
|
||||
unsigned int flushes;
|
||||
int err;
|
||||
@ -1258,7 +1258,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
||||
if (!obj->mm.dirty)
|
||||
set_page_dirty(page);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_local_page(page);
|
||||
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
|
||||
cache->page = pageno;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user