vfio/ap: Use the new device life cycle helpers

and manage available_instances inside @init/@release.

Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Tony Krowiak <akrowiak@linux.ibm.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20220921104401.38898-10-kevin.tian@intel.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Yi Liu 2022-09-21 18:43:55 +08:00 committed by Alex Williamson
parent a5ddd2a99a
commit 7cb5a82eb1

View File

@ -684,42 +684,44 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
AP_DOMAINS);
}
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev;
int ret;
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
return -EPERM;
matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
if (!matrix_mdev) {
ret = -ENOMEM;
goto err_dec_available;
}
vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev,
&vfio_ap_matrix_dev_ops);
matrix_mdev->mdev = mdev;
matrix_mdev->mdev = to_mdev_device(vdev->dev);
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
hash_init(matrix_mdev->qtable.queues);
return 0;
}
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
int ret;
matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
&vfio_ap_matrix_dev_ops);
if (IS_ERR(matrix_mdev))
return PTR_ERR(matrix_mdev);
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_list;
goto err_put_vdev;
dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
err_list:
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
atomic_inc(&matrix_dev->available_instances);
err_put_vdev:
vfio_put_device(&matrix_mdev->vdev);
return ret;
}
@ -766,6 +768,12 @@ static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
}
}
static void vfio_ap_mdev_release_dev(struct vfio_device *vdev)
{
atomic_inc(&matrix_dev->available_instances);
vfio_free_device(vdev);
}
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
@ -779,9 +787,7 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
vfio_put_device(&matrix_mdev->vdev);
}
static ssize_t name_show(struct mdev_type *mtype,
@ -1794,6 +1800,8 @@ static const struct attribute_group vfio_queue_attr_group = {
};
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.init = vfio_ap_mdev_init_dev,
.release = vfio_ap_mdev_release_dev,
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,