2022-11-29 16:31:51 -04:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright ( c ) 2021 - 2022 , NVIDIA CORPORATION & AFFILIATES
*/
# include <linux/vfio.h>
# include <linux/iommufd.h>
# include "vfio.h"
MODULE_IMPORT_NS ( IOMMUFD ) ;
MODULE_IMPORT_NS ( IOMMUFD_VFIO ) ;
2023-07-18 06:55:35 -07:00
bool vfio_iommufd_device_has_compat_ioas ( struct vfio_device * vdev ,
struct iommufd_ctx * ictx )
{
u32 ioas_id ;
return ! iommufd_vfio_compat_ioas_get_id ( ictx , & ioas_id ) ;
}
2023-07-18 06:55:37 -07:00
int vfio_df_iommufd_bind ( struct vfio_device_file * df )
2022-11-29 16:31:51 -04:00
{
2023-07-18 06:55:37 -07:00
struct vfio_device * vdev = df - > device ;
struct iommufd_ctx * ictx = df - > iommufd ;
2023-07-18 06:55:36 -07:00
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
2023-07-18 06:55:37 -07:00
return vdev - > ops - > bind_iommufd ( vdev , ictx , & df - > devid ) ;
2023-07-18 06:55:36 -07:00
}
int vfio_iommufd_compat_attach_ioas ( struct vfio_device * vdev ,
struct iommufd_ctx * ictx )
{
u32 ioas_id ;
2022-11-29 16:31:51 -04:00
int ret ;
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
2023-07-18 06:55:36 -07:00
/* compat noiommu does not need to do ioas attach */
if ( vfio_device_is_noiommu ( vdev ) )
return 0 ;
2022-11-29 16:31:51 -04:00
2023-01-18 13:50:28 -04:00
ret = iommufd_vfio_compat_ioas_get_id ( ictx , & ioas_id ) ;
2022-11-29 16:31:51 -04:00
if ( ret )
2023-07-18 06:55:36 -07:00
return ret ;
/* The legacy path has no way to return the selected pt_id */
return vdev - > ops - > attach_ioas ( vdev , & ioas_id ) ;
2022-11-29 16:31:51 -04:00
}
2023-07-18 06:55:37 -07:00
void vfio_df_iommufd_unbind ( struct vfio_device_file * df )
2022-11-29 16:31:51 -04:00
{
2023-07-18 06:55:37 -07:00
struct vfio_device * vdev = df - > device ;
2022-11-29 16:31:51 -04:00
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
2023-01-18 13:50:28 -04:00
if ( vfio_device_is_noiommu ( vdev ) )
return ;
2022-11-29 16:31:51 -04:00
if ( vdev - > ops - > unbind_iommufd )
vdev - > ops - > unbind_iommufd ( vdev ) ;
}
2023-07-18 03:55:40 -07:00
struct iommufd_ctx * vfio_iommufd_device_ictx ( struct vfio_device * vdev )
{
if ( vdev - > iommufd_device )
return iommufd_device_to_ictx ( vdev - > iommufd_device ) ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_device_ictx ) ;
static int vfio_iommufd_device_id ( struct vfio_device * vdev )
{
if ( vdev - > iommufd_device )
return iommufd_device_to_id ( vdev - > iommufd_device ) ;
return - EINVAL ;
}
/*
* Return devid for a device .
* valid ID for the device that is owned by the ictx
* - ENOENT = device is owned but there is no ID
* - ENODEV or other error = device is not owned
*/
int vfio_iommufd_get_dev_id ( struct vfio_device * vdev , struct iommufd_ctx * ictx )
{
struct iommu_group * group ;
int devid ;
if ( vfio_iommufd_device_ictx ( vdev ) = = ictx )
return vfio_iommufd_device_id ( vdev ) ;
group = iommu_group_get ( vdev - > dev ) ;
if ( ! group )
return - ENODEV ;
if ( iommufd_ctx_has_group ( ictx , group ) )
devid = - ENOENT ;
else
devid = - ENODEV ;
iommu_group_put ( group ) ;
return devid ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_get_dev_id ) ;
2022-11-29 16:31:51 -04:00
/*
* The physical standard ops mean that the iommufd_device is bound to the
* physical device vdev - > dev that was provided to vfio_init_group_dev ( ) . Drivers
* using this ops set should call vfio_register_group_dev ( )
*/
int vfio_iommufd_physical_bind ( struct vfio_device * vdev ,
struct iommufd_ctx * ictx , u32 * out_device_id )
{
struct iommufd_device * idev ;
idev = iommufd_device_bind ( ictx , vdev - > dev , out_device_id ) ;
if ( IS_ERR ( idev ) )
return PTR_ERR ( idev ) ;
vdev - > iommufd_device = idev ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_physical_bind ) ;
void vfio_iommufd_physical_unbind ( struct vfio_device * vdev )
{
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
if ( vdev - > iommufd_attached ) {
iommufd_device_detach ( vdev - > iommufd_device ) ;
vdev - > iommufd_attached = false ;
}
iommufd_device_unbind ( vdev - > iommufd_device ) ;
vdev - > iommufd_device = NULL ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_physical_unbind ) ;
int vfio_iommufd_physical_attach_ioas ( struct vfio_device * vdev , u32 * pt_id )
{
int rc ;
2023-07-18 06:55:38 -07:00
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
if ( WARN_ON ( ! vdev - > iommufd_device ) )
return - EINVAL ;
if ( vdev - > iommufd_attached )
2023-07-27 23:33:29 -07:00
rc = iommufd_device_replace ( vdev - > iommufd_device , pt_id ) ;
else
rc = iommufd_device_attach ( vdev - > iommufd_device , pt_id ) ;
2022-11-29 16:31:51 -04:00
if ( rc )
return rc ;
vdev - > iommufd_attached = true ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_physical_attach_ioas ) ;
2022-11-29 16:31:52 -04:00
2023-07-18 06:55:38 -07:00
void vfio_iommufd_physical_detach_ioas ( struct vfio_device * vdev )
{
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
if ( WARN_ON ( ! vdev - > iommufd_device ) | | ! vdev - > iommufd_attached )
return ;
iommufd_device_detach ( vdev - > iommufd_device ) ;
vdev - > iommufd_attached = false ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_physical_detach_ioas ) ;
2022-11-29 16:31:52 -04:00
/*
* The emulated standard ops mean that vfio_device is going to use the
* " mdev path " and will call vfio_pin_pages ( ) / vfio_dma_rw ( ) . Drivers using this
2023-03-27 02:33:50 -07:00
* ops set should call vfio_register_emulated_iommu_dev ( ) . Drivers that do
* not call vfio_pin_pages ( ) / vfio_dma_rw ( ) have no need to provide dma_unmap .
2022-11-29 16:31:52 -04:00
*/
static void vfio_emulated_unmap ( void * data , unsigned long iova ,
unsigned long length )
{
struct vfio_device * vdev = data ;
2023-03-27 02:33:50 -07:00
if ( vdev - > ops - > dma_unmap )
vdev - > ops - > dma_unmap ( vdev , iova , length ) ;
2022-11-29 16:31:52 -04:00
}
static const struct iommufd_access_ops vfio_user_ops = {
. needs_pin_pages = 1 ,
. unmap = vfio_emulated_unmap ,
} ;
int vfio_iommufd_emulated_bind ( struct vfio_device * vdev ,
struct iommufd_ctx * ictx , u32 * out_device_id )
{
2023-03-27 02:33:47 -07:00
struct iommufd_access * user ;
2022-11-29 16:31:52 -04:00
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
2023-03-27 02:33:49 -07:00
user = iommufd_access_create ( ictx , & vfio_user_ops , vdev , out_device_id ) ;
2023-03-27 02:33:48 -07:00
if ( IS_ERR ( user ) )
2023-03-27 02:33:47 -07:00
return PTR_ERR ( user ) ;
vdev - > iommufd_access = user ;
2022-11-29 16:31:52 -04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_emulated_bind ) ;
void vfio_iommufd_emulated_unbind ( struct vfio_device * vdev )
{
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
if ( vdev - > iommufd_access ) {
iommufd_access_destroy ( vdev - > iommufd_access ) ;
2023-03-27 02:33:47 -07:00
vdev - > iommufd_attached = false ;
2022-11-29 16:31:52 -04:00
vdev - > iommufd_access = NULL ;
}
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_emulated_unbind ) ;
int vfio_iommufd_emulated_attach_ioas ( struct vfio_device * vdev , u32 * pt_id )
{
2023-03-27 02:33:47 -07:00
int rc ;
2022-11-29 16:31:52 -04:00
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
2023-03-27 02:33:47 -07:00
if ( vdev - > iommufd_attached )
2023-07-27 23:33:29 -07:00
rc = iommufd_access_replace ( vdev - > iommufd_access , * pt_id ) ;
else
rc = iommufd_access_attach ( vdev - > iommufd_access , * pt_id ) ;
2023-03-27 02:33:47 -07:00
if ( rc )
return rc ;
vdev - > iommufd_attached = true ;
2022-11-29 16:31:52 -04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_emulated_attach_ioas ) ;
2023-07-18 06:55:40 -07:00
void vfio_iommufd_emulated_detach_ioas ( struct vfio_device * vdev )
{
lockdep_assert_held ( & vdev - > dev_set - > lock ) ;
if ( WARN_ON ( ! vdev - > iommufd_access ) | |
! vdev - > iommufd_attached )
return ;
iommufd_access_detach ( vdev - > iommufd_access ) ;
vdev - > iommufd_attached = false ;
}
EXPORT_SYMBOL_GPL ( vfio_iommufd_emulated_detach_ioas ) ;