IB/usnic: Update with bug fixes from core code
usnic has a modified version of the core codes' ib_umem_get() and related, and the copy misses many of the bug fixes done over the years: Commitbc3e53f682
("mm: distinguish between mlocked and pinned pages") Commit87773dd56d
("IB: ib_umem_release() should decrement mm->pinned_vm from ib_umem_get") Commit8494057ab5
("IB/uverbs: Prevent integer overflow in ib_umem_get address arithmetic") Commit8abaae62f3
("IB/core: disallow registering 0-sized memory region") Commit66578b0b2f
("IB/core: don't disallow registering region starting at 0x0") Commit53376fedb9
("RDMA/core: not to set page dirty bit if it's already set.") Commit8e907ed488
("IB/umem: Use the correct mm during ib_umem_release") Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
1975acd9f3
commit
43cbd64b1f
@ -666,7 +666,7 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
|
||||
usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
|
||||
|
||||
usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
|
||||
usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
|
||||
kfree(mr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "usnic_log.h"
|
||||
#include "usnic_uiom.h"
|
||||
@ -88,7 +89,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
|
||||
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
|
||||
page = sg_page(sg);
|
||||
pa = sg_phys(sg);
|
||||
if (dirty)
|
||||
if (!PageDirty(page) && dirty)
|
||||
set_page_dirty_lock(page);
|
||||
put_page(page);
|
||||
usnic_dbg("pa: %pa\n", &pa);
|
||||
@ -114,6 +115,16 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||
dma_addr_t pa;
|
||||
unsigned int gup_flags;
|
||||
|
||||
/*
|
||||
* If the combination of the addr and size requested for this memory
|
||||
* region causes an integer overflow, return error.
|
||||
*/
|
||||
if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
|
||||
return -EINVAL;
|
||||
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!can_do_mlock())
|
||||
return -EPERM;
|
||||
|
||||
@ -127,7 +138,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
|
||||
locked = npages + current->mm->locked_vm;
|
||||
locked = npages + current->mm->pinned_vm;
|
||||
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||
|
||||
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
||||
@ -143,7 +154,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
||||
ret = 0;
|
||||
|
||||
while (npages) {
|
||||
ret = get_user_pages(cur_base,
|
||||
ret = get_user_pages_longterm(cur_base,
|
||||
min_t(unsigned long, npages,
|
||||
PAGE_SIZE / sizeof(struct page *)),
|
||||
gup_flags, page_list, NULL);
|
||||
@ -186,7 +197,7 @@ out:
|
||||
if (ret < 0)
|
||||
usnic_uiom_put_pages(chunk_list, 0);
|
||||
else
|
||||
current->mm->locked_vm = locked;
|
||||
current->mm->pinned_vm = locked;
|
||||
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
free_page((unsigned long) page_list);
|
||||
@ -420,18 +431,22 @@ out_free_uiomr:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
|
||||
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
|
||||
struct ib_ucontext *ucontext)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct mm_struct *mm;
|
||||
unsigned long diff;
|
||||
|
||||
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
|
||||
|
||||
mm = get_task_mm(current);
|
||||
if (!mm) {
|
||||
kfree(uiomr);
|
||||
return;
|
||||
}
|
||||
task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
|
||||
if (!task)
|
||||
goto out;
|
||||
mm = get_task_mm(task);
|
||||
put_task_struct(task);
|
||||
if (!mm)
|
||||
goto out;
|
||||
|
||||
diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
|
||||
|
||||
@ -443,7 +458,7 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
|
||||
* up here and not be able to take the mmap_sem. In that case
|
||||
* we defer the vm_locked accounting to the system workqueue.
|
||||
*/
|
||||
if (closing) {
|
||||
if (ucontext->closing) {
|
||||
if (!down_write_trylock(&mm->mmap_sem)) {
|
||||
INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
|
||||
uiomr->mm = mm;
|
||||
@ -455,9 +470,10 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
|
||||
} else
|
||||
down_write(&mm->mmap_sem);
|
||||
|
||||
current->mm->locked_vm -= diff;
|
||||
mm->pinned_vm -= diff;
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
out:
|
||||
kfree(uiomr);
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,8 @@
|
||||
|
||||
#include "usnic_uiom_interval_tree.h"
|
||||
|
||||
struct ib_ucontext;
|
||||
|
||||
#define USNIC_UIOM_READ (1)
|
||||
#define USNIC_UIOM_WRITE (2)
|
||||
|
||||
@ -89,7 +91,8 @@ void usnic_uiom_free_dev_list(struct device **devs);
|
||||
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
|
||||
unsigned long addr, size_t size,
|
||||
int access, int dmasync);
|
||||
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
|
||||
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
|
||||
struct ib_ucontext *ucontext);
|
||||
int usnic_uiom_init(char *drv_name);
|
||||
void usnic_uiom_fini(void);
|
||||
#endif /* USNIC_UIOM_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user