mm: optimise vmf_anon_prepare() for VMAs without an anon_vma
If the mmap_lock can be taken for read, we can call __anon_vma_prepare() while holding it, saving ourselves a trip back through the fault handler. Link: https://lkml.kernel.org/r/20240426144506.1290619-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jann Horn <jannh@google.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
73b4a0cd82
commit
737019cf6a
13
mm/memory.c
13
mm/memory.c
@ -3232,16 +3232,21 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
|
|||||||
vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
|
vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
|
vm_fault_t ret = 0;
|
||||||
|
|
||||||
if (likely(vma->anon_vma))
|
if (likely(vma->anon_vma))
|
||||||
return 0;
|
return 0;
|
||||||
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
|
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
|
||||||
vma_end_read(vma);
|
if (!mmap_read_trylock(vma->vm_mm)) {
|
||||||
return VM_FAULT_RETRY;
|
vma_end_read(vma);
|
||||||
|
return VM_FAULT_RETRY;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (__anon_vma_prepare(vma))
|
if (__anon_vma_prepare(vma))
|
||||||
return VM_FAULT_OOM;
|
ret = VM_FAULT_OOM;
|
||||||
return 0;
|
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
|
||||||
|
mmap_read_unlock(vma->vm_mm);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user