hugetlbfs: check for pgoff value overflow
A vma with vm_pgoff large enough to overflow a loff_t type when converted to a byte offset can be passed via the remap_file_pages system call. The hugetlbfs mmap routine uses the byte offset to calculate reservations and file size. A sequence such as: mmap(0x20a00000, 0x600000, 0, 0x66033, -1, 0); remap_file_pages(0x20a00000, 0x600000, 0, 0x20000000000000, 0); will result in the following when task exits/file closed, kernel BUG at mm/hugetlb.c:749! Call Trace: hugetlbfs_evict_inode+0x2f/0x40 evict+0xcb/0x190 __dentry_kill+0xcb/0x150 __fput+0x164/0x1e0 task_work_run+0x84/0xa0 exit_to_usermode_loop+0x7d/0x80 do_syscall_64+0x18b/0x190 entry_SYSCALL_64_after_hwframe+0x3d/0xa2 The overflowed pgoff value causes hugetlbfs to try to set up a mapping with a negative range (end < start) that leaves invalid state which causes the BUG. The previous overflow fix to this code was incomplete and did not take the remap_file_pages system call into account. [mike.kravetz@oracle.com: v3] Link: http://lkml.kernel.org/r/20180309002726.7248-1-mike.kravetz@oracle.com [akpm@linux-foundation.org: include mmdebug.h] [akpm@linux-foundation.org: fix -ve left shift count on sh] Link: http://lkml.kernel.org/r/20180308210502.15952-1-mike.kravetz@oracle.com Fixes: 045c7a3f53d9 ("hugetlbfs: fix offset overflow in hugetlbfs mmap") Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reported-by: Nic Losby <blurbdust@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Yisheng Xie <xieyisheng1@huawei.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2e517d6816
commit
63489f8e82
@ -108,6 +108,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask used when checking the page offset value passed in via system
|
||||
* calls. This value will be converted to a loff_t which is signed.
|
||||
* Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
|
||||
* value. The extra bit (- 1 in the shift value) is to take the sign
|
||||
* bit into account.
|
||||
*/
|
||||
#define PGOFF_LOFFT_MAX \
|
||||
(((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
|
||||
|
||||
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
@ -127,12 +137,13 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
vma->vm_ops = &hugetlb_vm_ops;
|
||||
|
||||
/*
|
||||
* Offset passed to mmap (before page shift) could have been
|
||||
* negative when represented as a (l)off_t.
|
||||
* page based offset in vm_pgoff could be sufficiently large to
|
||||
* overflow a (l)off_t when converted to byte offset.
|
||||
*/
|
||||
if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
|
||||
if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* must be huge page aligned */
|
||||
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/rmap.h>
|
||||
#include <linux/string_helpers.h>
|
||||
@ -4374,6 +4375,12 @@ int hugetlb_reserve_pages(struct inode *inode,
|
||||
struct resv_map *resv_map;
|
||||
long gbl_reserve;
|
||||
|
||||
/* This should never happen */
|
||||
if (from > to) {
|
||||
VM_WARN(1, "%s called with a negative range\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only apply hugepage reservation if asked. At fault time, an
|
||||
* attempt will be made for VM_NORESERVE to allocate a page
|
||||
|
Loading…
x
Reference in New Issue
Block a user