RDMA/umem: Fix potential addition overflow
Given a large enough memory allocation, it is possible to wrap the pinned_vm counter. Check for addition overflow to prevent such eventualities. Fixes: 40ddacf2dda9 ("RDMA/umem: Don't hold mmap_sem for too long") Reported-by: Jason Gunthorpe <jgg@ziepe.ca> Signed-off-by: Doug Ledford <dledford@redhat.com> Reviewed-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
3312d1c6bd
commit
c6ce580716
@ -85,6 +85,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||||||
struct page **page_list;
|
struct page **page_list;
|
||||||
struct vm_area_struct **vma_list;
|
struct vm_area_struct **vma_list;
|
||||||
unsigned long lock_limit;
|
unsigned long lock_limit;
|
||||||
|
unsigned long new_pinned;
|
||||||
unsigned long cur_base;
|
unsigned long cur_base;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
unsigned long npages;
|
unsigned long npages;
|
||||||
@ -160,12 +161,13 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||||||
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
mm->pinned_vm += npages;
|
if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
|
||||||
if ((mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
(new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto vma;
|
goto out;
|
||||||
}
|
}
|
||||||
|
mm->pinned_vm = new_pinned;
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
|
|
||||||
cur_base = addr & PAGE_MASK;
|
cur_base = addr & PAGE_MASK;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user