vfio-iommufd: Support iommufd for physical VFIO devices
This creates the iommufd_device for the physical VFIO drivers. These are all the drivers that are calling vfio_register_group_dev() and expect the type1 code to setup a real iommu_domain against their parent struct device. The design gives the driver a choice in how it gets connected to iommufd by providing bind_iommufd/unbind_iommufd/attach_ioas callbacks to implement as required. The core code provides three default callbacks for physical mode using a real iommu_domain. This is suitable for drivers using vfio_register_group_dev() Link: https://lore.kernel.org/r/6-v4-42cd2eb0e3eb+335a-vfio_iommufd_jgg@nvidia.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Alex Williamson <alex.williamson@redhat.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Lixiao Yang <lixiao.yang@intel.com> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> Tested-by: Yu He <yu.he@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
2a3dab19a0
commit
a4d1f91db5
@ -6,6 +6,7 @@ obj-$(CONFIG_VFIO) += vfio.o
|
||||
vfio-y += vfio_main.o \
|
||||
iova_bitmap.o \
|
||||
container.o
|
||||
vfio-$(CONFIG_IOMMUFD) += iommufd.o
|
||||
|
||||
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
|
||||
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
|
||||
|
@ -592,6 +592,9 @@ static const struct vfio_device_ops vfio_fsl_mc_ops = {
|
||||
.read = vfio_fsl_mc_read,
|
||||
.write = vfio_fsl_mc_write,
|
||||
.mmap = vfio_fsl_mc_mmap,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static struct fsl_mc_driver vfio_fsl_mc_driver = {
|
||||
|
100
drivers/vfio/iommufd.c
Normal file
100
drivers/vfio/iommufd.c
Normal file
@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
|
||||
*/
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/iommufd.h>
|
||||
|
||||
#include "vfio.h"
|
||||
|
||||
MODULE_IMPORT_NS(IOMMUFD);
|
||||
MODULE_IMPORT_NS(IOMMUFD_VFIO);
|
||||
|
||||
int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
|
||||
{
|
||||
u32 ioas_id;
|
||||
u32 device_id;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vdev->dev_set->lock);
|
||||
|
||||
/*
|
||||
* If the driver doesn't provide this op then it means the device does
|
||||
* not do DMA at all. So nothing to do.
|
||||
*/
|
||||
if (!vdev->ops->bind_iommufd)
|
||||
return 0;
|
||||
|
||||
ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iommufd_vfio_compat_ioas_id(ictx, &ioas_id);
|
||||
if (ret)
|
||||
goto err_unbind;
|
||||
ret = vdev->ops->attach_ioas(vdev, &ioas_id);
|
||||
if (ret)
|
||||
goto err_unbind;
|
||||
|
||||
/*
|
||||
* The legacy path has no way to return the device id or the selected
|
||||
* pt_id
|
||||
*/
|
||||
return 0;
|
||||
|
||||
err_unbind:
|
||||
if (vdev->ops->unbind_iommufd)
|
||||
vdev->ops->unbind_iommufd(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vfio_iommufd_unbind(struct vfio_device *vdev)
|
||||
{
|
||||
lockdep_assert_held(&vdev->dev_set->lock);
|
||||
|
||||
if (vdev->ops->unbind_iommufd)
|
||||
vdev->ops->unbind_iommufd(vdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* The physical standard ops mean that the iommufd_device is bound to the
|
||||
* physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
|
||||
* using this ops set should call vfio_register_group_dev()
|
||||
*/
|
||||
int vfio_iommufd_physical_bind(struct vfio_device *vdev,
|
||||
struct iommufd_ctx *ictx, u32 *out_device_id)
|
||||
{
|
||||
struct iommufd_device *idev;
|
||||
|
||||
idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
|
||||
if (IS_ERR(idev))
|
||||
return PTR_ERR(idev);
|
||||
vdev->iommufd_device = idev;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
|
||||
|
||||
void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
|
||||
{
|
||||
lockdep_assert_held(&vdev->dev_set->lock);
|
||||
|
||||
if (vdev->iommufd_attached) {
|
||||
iommufd_device_detach(vdev->iommufd_device);
|
||||
vdev->iommufd_attached = false;
|
||||
}
|
||||
iommufd_device_unbind(vdev->iommufd_device);
|
||||
vdev->iommufd_device = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
|
||||
|
||||
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
vdev->iommufd_attached = true;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
|
@ -1246,6 +1246,9 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
|
||||
.mmap = hisi_acc_vfio_pci_mmap,
|
||||
.request = vfio_pci_core_request,
|
||||
.match = vfio_pci_core_match,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
|
||||
@ -1261,6 +1264,9 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
|
||||
.mmap = vfio_pci_core_mmap,
|
||||
.request = vfio_pci_core_request,
|
||||
.match = vfio_pci_core_match,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -623,6 +623,9 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
|
||||
.mmap = vfio_pci_core_mmap,
|
||||
.request = vfio_pci_core_request,
|
||||
.match = vfio_pci_core_match,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static int mlx5vf_pci_probe(struct pci_dev *pdev,
|
||||
|
@ -138,6 +138,9 @@ static const struct vfio_device_ops vfio_pci_ops = {
|
||||
.mmap = vfio_pci_core_mmap,
|
||||
.request = vfio_pci_core_request,
|
||||
.match = vfio_pci_core_match,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -117,6 +117,9 @@ static const struct vfio_device_ops vfio_amba_ops = {
|
||||
.read = vfio_platform_read,
|
||||
.write = vfio_platform_write,
|
||||
.mmap = vfio_platform_mmap,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static const struct amba_id pl330_ids[] = {
|
||||
|
@ -106,6 +106,9 @@ static const struct vfio_device_ops vfio_platform_ops = {
|
||||
.read = vfio_platform_read,
|
||||
.write = vfio_platform_write,
|
||||
.mmap = vfio_platform_mmap,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
};
|
||||
|
||||
static struct platform_driver vfio_platform_driver = {
|
||||
|
@ -124,6 +124,21 @@ void vfio_device_container_unregister(struct vfio_device *device);
|
||||
int __init vfio_container_init(void);
|
||||
void vfio_container_cleanup(void);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IOMMUFD)
|
||||
int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
|
||||
void vfio_iommufd_unbind(struct vfio_device *device);
|
||||
#else
|
||||
static inline int vfio_iommufd_bind(struct vfio_device *device,
|
||||
struct iommufd_ctx *ictx)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void vfio_iommufd_unbind(struct vfio_device *device)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VFIO_NOIOMMU
|
||||
extern bool vfio_noiommu __read_mostly;
|
||||
#else
|
||||
|
@ -525,6 +525,11 @@ static int __vfio_register_dev(struct vfio_device *device,
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
if (WARN_ON(device->ops->bind_iommufd &&
|
||||
(!device->ops->unbind_iommufd ||
|
||||
!device->ops->attach_ioas)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If the driver doesn't specify a set then the device is added to a
|
||||
* singleton set just for itself.
|
||||
@ -794,6 +799,10 @@ static int vfio_device_first_open(struct vfio_device *device)
|
||||
ret = vfio_group_use_container(device->group);
|
||||
if (ret)
|
||||
goto err_module_put;
|
||||
} else if (device->group->iommufd) {
|
||||
ret = vfio_iommufd_bind(device, device->group->iommufd);
|
||||
if (ret)
|
||||
goto err_module_put;
|
||||
}
|
||||
|
||||
device->kvm = device->group->kvm;
|
||||
@ -811,6 +820,8 @@ err_container:
|
||||
device->kvm = NULL;
|
||||
if (device->group->container)
|
||||
vfio_group_unuse_container(device->group);
|
||||
else if (device->group->iommufd)
|
||||
vfio_iommufd_unbind(device);
|
||||
err_module_put:
|
||||
mutex_unlock(&device->group->group_lock);
|
||||
module_put(device->dev->driver->owner);
|
||||
@ -829,6 +840,8 @@ static void vfio_device_last_close(struct vfio_device *device)
|
||||
device->kvm = NULL;
|
||||
if (device->group->container)
|
||||
vfio_group_unuse_container(device->group);
|
||||
else if (device->group->iommufd)
|
||||
vfio_iommufd_unbind(device);
|
||||
mutex_unlock(&device->group->group_lock);
|
||||
module_put(device->dev->driver->owner);
|
||||
}
|
||||
@ -1936,8 +1949,6 @@ static void __exit vfio_cleanup(void)
|
||||
module_init(vfio_init);
|
||||
module_exit(vfio_cleanup);
|
||||
|
||||
MODULE_IMPORT_NS(IOMMUFD);
|
||||
MODULE_IMPORT_NS(IOMMUFD_VFIO);
|
||||
MODULE_VERSION(DRIVER_VERSION);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <linux/iova_bitmap.h>
|
||||
|
||||
struct kvm;
|
||||
struct iommufd_ctx;
|
||||
struct iommufd_device;
|
||||
|
||||
/*
|
||||
* VFIO devices can be placed in a set, this allows all devices to share this
|
||||
@ -54,6 +56,10 @@ struct vfio_device {
|
||||
struct completion comp;
|
||||
struct list_head group_next;
|
||||
struct list_head iommu_entry;
|
||||
#if IS_ENABLED(CONFIG_IOMMUFD)
|
||||
struct iommufd_device *iommufd_device;
|
||||
bool iommufd_attached;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
@ -80,6 +86,10 @@ struct vfio_device_ops {
|
||||
char *name;
|
||||
int (*init)(struct vfio_device *vdev);
|
||||
void (*release)(struct vfio_device *vdev);
|
||||
int (*bind_iommufd)(struct vfio_device *vdev,
|
||||
struct iommufd_ctx *ictx, u32 *out_device_id);
|
||||
void (*unbind_iommufd)(struct vfio_device *vdev);
|
||||
int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
|
||||
int (*open_device)(struct vfio_device *vdev);
|
||||
void (*close_device)(struct vfio_device *vdev);
|
||||
ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
|
||||
@ -96,6 +106,21 @@ struct vfio_device_ops {
|
||||
void __user *arg, size_t argsz);
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_IOMMUFD)
|
||||
int vfio_iommufd_physical_bind(struct vfio_device *vdev,
|
||||
struct iommufd_ctx *ictx, u32 *out_device_id);
|
||||
void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
|
||||
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
|
||||
#else
|
||||
#define vfio_iommufd_physical_bind \
|
||||
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
|
||||
u32 *out_device_id)) NULL)
|
||||
#define vfio_iommufd_physical_unbind \
|
||||
((void (*)(struct vfio_device *vdev)) NULL)
|
||||
#define vfio_iommufd_physical_attach_ioas \
|
||||
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @migration_set_state: Optional callback to change the migration state for
|
||||
* devices that support migration. It's mandatory for
|
||||
|
Loading…
x
Reference in New Issue
Block a user