Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"

commit 4163cb3d1980383220ad7043002b930995dcba33 upstream.

This patch is not the full fix and still causes to call traces
during mlx5_ib_dereg_mr().

This reverts commit f0ae4afe3d35e67db042c58a52909e06262b740f.

Fixes: f0ae4afe3d35 ("RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow")
Link: https://lore.kernel.org/r/20211222101312.1358616-1-maorg@nvidia.com
Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
Acked-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Maor Gottlieb 2021-12-22 12:13:12 +02:00 committed by Greg Kroah-Hartman
parent 21f8a3b110
commit c44979ace4
2 changed files with 17 additions and 15 deletions
drivers/infiniband/hw/mlx5

@ -641,6 +641,7 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
struct ib_umem *umem;
/* This is zero'd when the MR is allocated */
union {
@ -652,7 +653,7 @@ struct mlx5_ib_mr {
struct list_head list;
};
/* Used only by kernel MRs */
/* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
void *descs_alloc;
@ -674,9 +675,8 @@ struct mlx5_ib_mr {
int data_length;
};
/* Used only by User MRs */
/* Used only by User MRs (umem != NULL) */
struct {
struct ib_umem *umem;
unsigned int page_shift;
/* Current access_flags */
int access_flags;

@ -1911,18 +1911,19 @@ err:
return ret;
}
static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int size = mr->max_descs * mr->desc_size;
if (!mr->umem && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
if (!mr->descs)
return;
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
DMA_TO_DEVICE);
kfree(mr->descs_alloc);
mr->descs = NULL;
dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
DMA_TO_DEVICE);
kfree(mr->descs_alloc);
mr->descs = NULL;
}
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@ -1998,8 +1999,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
} else {
if (!udata)
mlx5_free_priv_descs(mr);
mlx5_free_priv_descs(mr);
kfree(mr);
}
return 0;
@ -2086,6 +2086,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err)
goto err_free_in;
mr->umem = NULL;
kfree(in);
return mr;
@ -2212,6 +2213,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
}
mr->ibmr.device = pd->device;
mr->umem = NULL;
switch (mr_type) {
case IB_MR_TYPE_MEM_REG: