diff --git a/mm/mmap.c b/mm/mmap.c index bb128a42557e..d72716f7a0b4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2802,6 +2802,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } +static inline void +unlock_range(struct vm_area_struct *start, unsigned long limit) +{ + struct mm_struct *mm = start->vm_mm; + struct vm_area_struct *tmp = start; + + while (tmp && tmp->vm_start < limit) { + if (tmp->vm_flags & VM_LOCKED) { + mm->locked_vm -= vma_pages(tmp); + munlock_vma_pages_all(tmp); + } + + tmp = tmp->vm_next; + } +} + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -2885,17 +2901,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* * unlock any mlock()ed ranges before detaching vmas */ - if (mm->locked_vm) { - struct vm_area_struct *tmp = vma; - while (tmp && tmp->vm_start < end) { - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); - } - - tmp = tmp->vm_next; - } - } + if (mm->locked_vm) + unlock_range(vma, end); /* Detach vmas from rbtree */ if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) @@ -3180,14 +3187,8 @@ void exit_mmap(struct mm_struct *mm) mmap_write_unlock(mm); } - if (mm->locked_vm) { - vma = mm->mmap; - while (vma) { - if (vma->vm_flags & VM_LOCKED) - munlock_vma_pages_all(vma); - vma = vma->vm_next; - } - } + if (mm->locked_vm) + unlock_range(mm->mmap, ULONG_MAX); arch_exit_mmap(mm);