mm: always lock new vma before inserting into vma tree

While it's not strictly necessary to lock a newly created vma before
adding it into the vma tree (as long as no further changes are performed
to it), it seems like a good policy to lock it and prevent accidental
changes after it becomes visible to the page faults. Lock the vma before
adding it into the vma tree.

[akpm@linux-foundation.org: fix reject fixing in vma_link(), per Jann]
Link: https://lkml.kernel.org/r/20230804152724.3090321-6-surenb@google.com
Suggested-by: Jann Horn <jannh@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Linus Torvalds <torvalds@linuxfoundation.org>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Suren Baghdasaryan 2023-08-04 08:27:23 -07:00 committed by Andrew Morton
parent 60081bf19b
commit ad9f006351

View File

@ -401,6 +401,8 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma_iter_prealloc(&vmi, vma)) if (vma_iter_prealloc(&vmi, vma))
return -ENOMEM; return -ENOMEM;
vma_start_write(vma);
vma_iter_store(&vmi, vma); vma_iter_store(&vmi, vma);
if (vma->vm_file) { if (vma->vm_file) {
@ -463,7 +465,8 @@ static inline void vma_prepare(struct vma_prepare *vp)
vma_start_write(vp->vma); vma_start_write(vp->vma);
if (vp->adj_next) if (vp->adj_next)
vma_start_write(vp->adj_next); vma_start_write(vp->adj_next);
/* vp->insert is always a newly created VMA, no need for locking */ if (vp->insert)
vma_start_write(vp->insert);
if (vp->remove) if (vp->remove)
vma_start_write(vp->remove); vma_start_write(vp->remove);
if (vp->remove2) if (vp->remove2)
@ -3093,6 +3096,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vma->vm_pgoff = addr >> PAGE_SHIFT; vma->vm_pgoff = addr >> PAGE_SHIFT;
vm_flags_init(vma, flags); vm_flags_init(vma, flags);
vma->vm_page_prot = vm_get_page_prot(flags); vma->vm_page_prot = vm_get_page_prot(flags);
vma_start_write(vma);
if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
goto mas_store_fail; goto mas_store_fail;
@ -3341,7 +3345,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
get_file(new_vma->vm_file); get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open) if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma); new_vma->vm_ops->open(new_vma);
vma_start_write(new_vma);
if (vma_link(mm, new_vma)) if (vma_link(mm, new_vma))
goto out_vma_link; goto out_vma_link;
*need_rmap_locks = false; *need_rmap_locks = false;