mm: make the mlock() stack guard page checks stricter
If we've split the stack vma, only the lowest one has the guard page. Now that we have a doubly linked list of vma's, checking this is trivial. Tested-by: Ian Campbell <ijc@hellion.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
297c5eee37
commit
7798330ac8
19
mm/mlock.c
19
mm/mlock.c
@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Is the vma a continuation of the stack vma above it? */
|
||||||
|
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
|
||||||
|
{
|
||||||
|
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
|
||||||
|
{
|
||||||
|
return (vma->vm_flags & VM_GROWSDOWN) &&
|
||||||
|
(vma->vm_start == addr) &&
|
||||||
|
!vma_stack_continue(vma->vm_prev, addr);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
|
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
|
||||||
* @vma: target vma
|
* @vma: target vma
|
||||||
@ -168,12 +181,10 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||||||
gup_flags |= FOLL_WRITE;
|
gup_flags |= FOLL_WRITE;
|
||||||
|
|
||||||
/* We don't try to access the guard page of a stack vma */
|
/* We don't try to access the guard page of a stack vma */
|
||||||
if (vma->vm_flags & VM_GROWSDOWN) {
|
if (stack_guard_page(vma, start)) {
|
||||||
if (start == vma->vm_start) {
|
addr += PAGE_SIZE;
|
||||||
start += PAGE_SIZE;
|
|
||||||
nr_pages--;
|
nr_pages--;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
while (nr_pages > 0) {
|
while (nr_pages > 0) {
|
||||||
int i;
|
int i;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user