vdpa/mlx5: implement .reset_map driver op
Since commit 6f5312f80183 ("vdpa/mlx5: Add support for running with virtio_vdpa"), mlx5_vdpa starts with preallocate 1:1 DMA MR at device creation time. This 1:1 DMA MR will be implicitly destroyed while the first .set_map call is invoked, in which case callers like vhost-vdpa will start to set up custom mappings. When the .reset callback is invoked, the custom mappings will be cleared and the 1:1 DMA MR will be re-created. In order to reduce excessive memory mapping cost in live migration, it is desirable to decouple the vhost-vdpa IOTLB abstraction from the virtio device life cycle, i.e. mappings can be kept around intact across virtio device reset. Leverage the .reset_map callback, which is meant to destroy the regular MR (including cvq mapping) on the given ASID and recreate the initial DMA mapping. That way, the device .reset op runs free from having to maintain and clean up memory mappings by itself. Additionally, implement .compat_reset to cater for older userspace, which may wish to see mapping to be cleared during reset. Co-developed-by: Dragos Tatulea <dtatulea@nvidia.com> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com> Message-Id: <1697880319-4937-7-git-send-email-si-wei.liu@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com>
This commit is contained in:
parent
bc91df5c70
commit
2eacf4b5e3
@ -127,6 +127,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
|
|||||||
struct vhost_iotlb *iotlb,
|
struct vhost_iotlb *iotlb,
|
||||||
unsigned int asid);
|
unsigned int asid);
|
||||||
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
|
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
|
||||||
|
int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
|
||||||
|
|
||||||
#define mlx5_vdpa_warn(__dev, format, ...) \
|
#define mlx5_vdpa_warn(__dev, format, ...) \
|
||||||
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
|
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
|
||||||
|
@ -645,3 +645,20 @@ int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
|
|||||||
|
|
||||||
return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
|
return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
|
||||||
|
{
|
||||||
|
if (asid >= MLX5_VDPA_NUM_AS)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]);
|
||||||
|
|
||||||
|
if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
|
||||||
|
if (mlx5_vdpa_create_dma_mr(mvdev))
|
||||||
|
mlx5_vdpa_warn(mvdev, "create DMA MR failed\n");
|
||||||
|
} else {
|
||||||
|
mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, asid);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
@ -2876,7 +2876,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
|
|||||||
mvdev->group2asid[i] = 0;
|
mvdev->group2asid[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
@ -2888,6 +2888,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
|||||||
unregister_link_notifier(ndev);
|
unregister_link_notifier(ndev);
|
||||||
teardown_driver(ndev);
|
teardown_driver(ndev);
|
||||||
clear_vqs_ready(ndev);
|
clear_vqs_ready(ndev);
|
||||||
|
if (flags & VDPA_RESET_F_CLEAN_MAP)
|
||||||
mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
|
mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
|
||||||
ndev->mvdev.status = 0;
|
ndev->mvdev.status = 0;
|
||||||
ndev->mvdev.suspended = false;
|
ndev->mvdev.suspended = false;
|
||||||
@ -2899,7 +2900,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
|||||||
init_group_to_asid_map(mvdev);
|
init_group_to_asid_map(mvdev);
|
||||||
++mvdev->generation;
|
++mvdev->generation;
|
||||||
|
|
||||||
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
|
if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
|
||||||
|
MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
|
||||||
if (mlx5_vdpa_create_dma_mr(mvdev))
|
if (mlx5_vdpa_create_dma_mr(mvdev))
|
||||||
mlx5_vdpa_warn(mvdev, "create MR failed\n");
|
mlx5_vdpa_warn(mvdev, "create MR failed\n");
|
||||||
}
|
}
|
||||||
@ -2908,6 +2910,11 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
||||||
|
{
|
||||||
|
return mlx5_vdpa_compat_reset(vdev, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
|
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
|
||||||
{
|
{
|
||||||
return sizeof(struct virtio_net_config);
|
return sizeof(struct virtio_net_config);
|
||||||
@ -2987,6 +2994,18 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid)
|
||||||
|
{
|
||||||
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
|
int err;
|
||||||
|
|
||||||
|
down_write(&ndev->reslock);
|
||||||
|
err = mlx5_vdpa_reset_mr(mvdev, asid);
|
||||||
|
up_write(&ndev->reslock);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
|
static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
@ -3250,11 +3269,13 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
|
|||||||
.get_status = mlx5_vdpa_get_status,
|
.get_status = mlx5_vdpa_get_status,
|
||||||
.set_status = mlx5_vdpa_set_status,
|
.set_status = mlx5_vdpa_set_status,
|
||||||
.reset = mlx5_vdpa_reset,
|
.reset = mlx5_vdpa_reset,
|
||||||
|
.compat_reset = mlx5_vdpa_compat_reset,
|
||||||
.get_config_size = mlx5_vdpa_get_config_size,
|
.get_config_size = mlx5_vdpa_get_config_size,
|
||||||
.get_config = mlx5_vdpa_get_config,
|
.get_config = mlx5_vdpa_get_config,
|
||||||
.set_config = mlx5_vdpa_set_config,
|
.set_config = mlx5_vdpa_set_config,
|
||||||
.get_generation = mlx5_vdpa_get_generation,
|
.get_generation = mlx5_vdpa_get_generation,
|
||||||
.set_map = mlx5_vdpa_set_map,
|
.set_map = mlx5_vdpa_set_map,
|
||||||
|
.reset_map = mlx5_vdpa_reset_map,
|
||||||
.set_group_asid = mlx5_set_group_asid,
|
.set_group_asid = mlx5_set_group_asid,
|
||||||
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
|
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
|
||||||
.free = mlx5_vdpa_free,
|
.free = mlx5_vdpa_free,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user