RDMA/rxe: Fix memory leak in error path code
In rxe_mr_init_user() at the third error the driver fails to free the
memory at mr->map. This patch adds code to do that. This error only
occurs if page_address() fails to return a non zero address which should
never happen for 64 bit architectures.
Fixes: 8700e3e7c4
("Soft RoCE driver")
Link: https://lore.kernel.org/r/20210705164153.17652-1-rpearsonhpe@gmail.com
Reported by: Haakon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Reviewed-by: Zhu Yanjun <zyjzyj2000@gmail.com>
Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
c9538831b3
commit
b18c7da63f
@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
int num_buf;
|
int num_buf;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
int err;
|
int err;
|
||||||
|
int i;
|
||||||
|
|
||||||
umem = ib_umem_get(pd->ibpd.device, start, length, access);
|
umem = ib_umem_get(pd->ibpd.device, start, length, access);
|
||||||
if (IS_ERR(umem)) {
|
if (IS_ERR(umem)) {
|
||||||
pr_warn("err %d from rxe_umem_get\n",
|
pr_warn("%s: Unable to pin memory region err = %d\n",
|
||||||
(int)PTR_ERR(umem));
|
__func__, (int)PTR_ERR(umem));
|
||||||
err = PTR_ERR(umem);
|
err = PTR_ERR(umem);
|
||||||
goto err1;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mr->umem = umem;
|
mr->umem = umem;
|
||||||
@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
|
|
||||||
err = rxe_mr_alloc(mr, num_buf);
|
err = rxe_mr_alloc(mr, num_buf);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_warn("err %d from rxe_mr_alloc\n", err);
|
pr_warn("%s: Unable to allocate memory for map\n",
|
||||||
ib_umem_release(umem);
|
__func__);
|
||||||
goto err1;
|
goto err_release_umem;
|
||||||
}
|
}
|
||||||
|
|
||||||
mr->page_shift = PAGE_SHIFT;
|
mr->page_shift = PAGE_SHIFT;
|
||||||
@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
|
|
||||||
vaddr = page_address(sg_page_iter_page(&sg_iter));
|
vaddr = page_address(sg_page_iter_page(&sg_iter));
|
||||||
if (!vaddr) {
|
if (!vaddr) {
|
||||||
pr_warn("null vaddr\n");
|
pr_warn("%s: Unable to get virtual address\n",
|
||||||
ib_umem_release(umem);
|
__func__);
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err1;
|
goto err_cleanup_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->addr = (uintptr_t)vaddr;
|
buf->addr = (uintptr_t)vaddr;
|
||||||
@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err1:
|
err_cleanup_map:
|
||||||
|
for (i = 0; i < mr->num_map; i++)
|
||||||
|
kfree(mr->map[i]);
|
||||||
|
kfree(mr->map);
|
||||||
|
err_release_umem:
|
||||||
|
ib_umem_release(umem);
|
||||||
|
err_out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user