RDMA/siw: Fix user page pinning accounting
To avoid racing with other user memory reservations, immediately account full amount of pages to be pinned. Fixes: 2251334dcac9 ("rdma/siw: application buffer management") Reported-by: Jason Gunthorpe <jgg@nvidia.com> Suggested-by: Alistair Popple <apopple@nvidia.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com> Link: https://lore.kernel.org/r/20230202101000.402990-1-bmt@zurich.ibm.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
ef42520240
commit
65a8fc30fb
@ -398,7 +398,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
|
|||||||
|
|
||||||
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
|
if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
|
||||||
rv = -ENOMEM;
|
rv = -ENOMEM;
|
||||||
goto out_sem_up;
|
goto out_sem_up;
|
||||||
}
|
}
|
||||||
@ -411,30 +411,27 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
|
|||||||
goto out_sem_up;
|
goto out_sem_up;
|
||||||
}
|
}
|
||||||
for (i = 0; num_pages; i++) {
|
for (i = 0; num_pages; i++) {
|
||||||
int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
|
int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
|
||||||
|
struct page **plist =
|
||||||
umem->page_chunk[i].plist =
|
|
||||||
kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
|
kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!umem->page_chunk[i].plist) {
|
|
||||||
|
if (!plist) {
|
||||||
rv = -ENOMEM;
|
rv = -ENOMEM;
|
||||||
goto out_sem_up;
|
goto out_sem_up;
|
||||||
}
|
}
|
||||||
got = 0;
|
umem->page_chunk[i].plist = plist;
|
||||||
while (nents) {
|
while (nents) {
|
||||||
struct page **plist = &umem->page_chunk[i].plist[got];
|
|
||||||
|
|
||||||
rv = pin_user_pages(first_page_va, nents, foll_flags,
|
rv = pin_user_pages(first_page_va, nents, foll_flags,
|
||||||
plist, NULL);
|
plist, NULL);
|
||||||
if (rv < 0)
|
if (rv < 0)
|
||||||
goto out_sem_up;
|
goto out_sem_up;
|
||||||
|
|
||||||
umem->num_pages += rv;
|
umem->num_pages += rv;
|
||||||
atomic64_add(rv, &mm_s->pinned_vm);
|
|
||||||
first_page_va += rv * PAGE_SIZE;
|
first_page_va += rv * PAGE_SIZE;
|
||||||
|
plist += rv;
|
||||||
nents -= rv;
|
nents -= rv;
|
||||||
got += rv;
|
num_pages -= rv;
|
||||||
}
|
}
|
||||||
num_pages -= got;
|
|
||||||
}
|
}
|
||||||
out_sem_up:
|
out_sem_up:
|
||||||
mmap_read_unlock(mm_s);
|
mmap_read_unlock(mm_s);
|
||||||
@ -442,6 +439,10 @@ out_sem_up:
|
|||||||
if (rv > 0)
|
if (rv > 0)
|
||||||
return umem;
|
return umem;
|
||||||
|
|
||||||
|
/* Adjust accounting for pages not pinned */
|
||||||
|
if (num_pages)
|
||||||
|
atomic64_sub(num_pages, &mm_s->pinned_vm);
|
||||||
|
|
||||||
siw_umem_release(umem, false);
|
siw_umem_release(umem, false);
|
||||||
|
|
||||||
return ERR_PTR(rv);
|
return ERR_PTR(rv);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user