2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-10-30 21:02:17 +04:00
/*
* VFIO - KVM bridge pseudo device
*
* Copyright ( C ) 2013 Red Hat , Inc . All rights reserved .
* Author : Alex Williamson < alex . williamson @ redhat . com >
*/
# include <linux/errno.h>
# include <linux/file.h>
# include <linux/kvm_host.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include <linux/vfio.h>
2014-09-24 15:02:46 +04:00
# include "vfio.h"
2013-10-30 21:02:17 +04:00
2017-03-22 07:21:56 +03:00
# ifdef CONFIG_SPAPR_TCE_IOMMU
# include <asm/kvm_ppc.h>
# endif
2013-10-30 21:02:17 +04:00
struct kvm_vfio_group {
struct list_head node ;
2022-05-04 22:14:40 +03:00
struct file * file ;
2022-10-07 17:04:40 +03:00
# ifdef CONFIG_SPAPR_TCE_IOMMU
struct iommu_group * iommu_group ;
# endif
2013-10-30 21:02:17 +04:00
} ;
struct kvm_vfio {
struct list_head group_list ;
struct mutex lock ;
2013-10-30 21:02:30 +04:00
bool noncoherent ;
2013-10-30 21:02:17 +04:00
} ;
2022-05-04 22:14:44 +03:00
static void kvm_vfio_file_set_kvm ( struct file * file , struct kvm * kvm )
2016-12-01 08:20:07 +03:00
{
2022-05-04 22:14:44 +03:00
void ( * fn ) ( struct file * file , struct kvm * kvm ) ;
2016-12-01 08:20:07 +03:00
2022-05-04 22:14:44 +03:00
fn = symbol_get ( vfio_file_set_kvm ) ;
2016-12-01 08:20:07 +03:00
if ( ! fn )
return ;
2022-05-04 22:14:44 +03:00
fn ( file , kvm ) ;
2016-12-01 08:20:07 +03:00
2022-05-04 22:14:44 +03:00
symbol_put ( vfio_file_set_kvm ) ;
2016-12-01 08:20:07 +03:00
}
2022-05-04 22:14:43 +03:00
static bool kvm_vfio_file_enforced_coherent ( struct file * file )
2014-02-26 22:38:40 +04:00
{
2022-05-04 22:14:43 +03:00
bool ( * fn ) ( struct file * file ) ;
bool ret ;
2014-02-26 22:38:40 +04:00
2022-05-04 22:14:43 +03:00
fn = symbol_get ( vfio_file_enforced_coherent ) ;
2014-02-26 22:38:40 +04:00
if ( ! fn )
return false ;
2022-05-04 22:14:43 +03:00
ret = fn ( file ) ;
2014-02-26 22:38:40 +04:00
2022-05-04 22:14:43 +03:00
symbol_put ( vfio_file_enforced_coherent ) ;
2014-02-26 22:38:40 +04:00
2022-05-04 22:14:43 +03:00
return ret ;
2014-02-26 22:38:40 +04:00
}
2022-10-07 17:04:39 +03:00
static bool kvm_vfio_file_is_group ( struct file * file )
{
bool ( * fn ) ( struct file * file ) ;
bool ret ;
fn = symbol_get ( vfio_file_is_group ) ;
if ( ! fn )
return false ;
ret = fn ( file ) ;
symbol_put ( vfio_file_is_group ) ;
return ret ;
}
# ifdef CONFIG_SPAPR_TCE_IOMMU
2022-05-04 22:14:41 +03:00
static struct iommu_group * kvm_vfio_file_iommu_group ( struct file * file )
2017-03-22 07:21:56 +03:00
{
2022-05-04 22:14:41 +03:00
struct iommu_group * ( * fn ) ( struct file * file ) ;
struct iommu_group * ret ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:41 +03:00
fn = symbol_get ( vfio_file_iommu_group ) ;
2017-03-22 07:21:56 +03:00
if ( ! fn )
2022-05-04 22:14:41 +03:00
return NULL ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:41 +03:00
ret = fn ( file ) ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:41 +03:00
symbol_put ( vfio_file_iommu_group ) ;
2017-03-22 07:21:56 +03:00
return ret ;
}
static void kvm_spapr_tce_release_vfio_group ( struct kvm * kvm ,
2022-05-04 22:14:41 +03:00
struct kvm_vfio_group * kvg )
2017-03-22 07:21:56 +03:00
{
2022-10-07 17:04:40 +03:00
if ( WARN_ON_ONCE ( ! kvg - > iommu_group ) )
2017-03-22 07:21:56 +03:00
return ;
2022-10-07 17:04:40 +03:00
kvm_spapr_tce_release_iommu_group ( kvm , kvg - > iommu_group ) ;
iommu_group_put ( kvg - > iommu_group ) ;
kvg - > iommu_group = NULL ;
2017-03-22 07:21:56 +03:00
}
# endif
2013-10-30 21:02:30 +04:00
/*
* Groups can use the same or different IOMMU domains . If the same then
* adding a new group may change the coherency of groups we ' ve previously
* been told about . We don ' t want to care about any of that so we retest
* each group and bail as soon as we find one that ' s noncoherent . This
* means we only ever [ un ] register_noncoherent_dma once for the whole device .
*/
static void kvm_vfio_update_coherency ( struct kvm_device * dev )
{
struct kvm_vfio * kv = dev - > private ;
bool noncoherent = false ;
struct kvm_vfio_group * kvg ;
mutex_lock ( & kv - > lock ) ;
list_for_each_entry ( kvg , & kv - > group_list , node ) {
2022-05-04 22:14:43 +03:00
if ( ! kvm_vfio_file_enforced_coherent ( kvg - > file ) ) {
2014-02-26 22:38:40 +04:00
noncoherent = true ;
break ;
}
2013-10-30 21:02:30 +04:00
}
if ( noncoherent ! = kv - > noncoherent ) {
kv - > noncoherent = noncoherent ;
if ( kv - > noncoherent )
kvm_arch_register_noncoherent_dma ( dev - > kvm ) ;
else
kvm_arch_unregister_noncoherent_dma ( dev - > kvm ) ;
}
mutex_unlock ( & kv - > lock ) ;
}
2022-05-04 22:14:39 +03:00
static int kvm_vfio_group_add ( struct kvm_device * dev , unsigned int fd )
2013-10-30 21:02:17 +04:00
{
struct kvm_vfio * kv = dev - > private ;
struct kvm_vfio_group * kvg ;
2022-05-04 22:14:40 +03:00
struct file * filp ;
2013-10-30 21:02:17 +04:00
int ret ;
2022-05-04 22:14:40 +03:00
filp = fget ( fd ) ;
if ( ! filp )
2022-05-04 22:14:39 +03:00
return - EBADF ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:45 +03:00
/* Ensure the FD is a vfio group FD.*/
2022-10-07 17:04:39 +03:00
if ( ! kvm_vfio_file_is_group ( filp ) ) {
2022-05-04 22:14:45 +03:00
ret = - EINVAL ;
goto err_fput ;
}
2022-05-04 22:14:39 +03:00
mutex_lock ( & kv - > lock ) ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
list_for_each_entry ( kvg , & kv - > group_list , node ) {
2022-05-04 22:14:40 +03:00
if ( kvg - > file = = filp ) {
2022-05-04 22:14:39 +03:00
ret = - EEXIST ;
goto err_unlock ;
2013-10-30 21:02:17 +04:00
}
2022-05-04 22:14:39 +03:00
}
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
kvg = kzalloc ( sizeof ( * kvg ) , GFP_KERNEL_ACCOUNT ) ;
if ( ! kvg ) {
ret = - ENOMEM ;
goto err_unlock ;
}
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:40 +03:00
kvg - > file = filp ;
2022-05-04 22:14:39 +03:00
list_add_tail ( & kvg - > node , & kv - > group_list ) ;
2015-07-07 16:41:58 +03:00
2022-05-04 22:14:39 +03:00
kvm_arch_start_assignment ( dev - > kvm ) ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
mutex_unlock ( & kv - > lock ) ;
2016-12-01 08:20:07 +03:00
2022-05-04 22:14:44 +03:00
kvm_vfio_file_set_kvm ( kvg - > file , dev - > kvm ) ;
2022-05-04 22:14:39 +03:00
kvm_vfio_update_coherency ( dev ) ;
2013-10-30 21:02:30 +04:00
2022-05-04 22:14:39 +03:00
return 0 ;
err_unlock :
mutex_unlock ( & kv - > lock ) ;
2022-05-04 22:14:45 +03:00
err_fput :
2022-05-04 22:14:40 +03:00
fput ( filp ) ;
2022-05-04 22:14:39 +03:00
return ret ;
}
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
static int kvm_vfio_group_del ( struct kvm_device * dev , unsigned int fd )
{
struct kvm_vfio * kv = dev - > private ;
struct kvm_vfio_group * kvg ;
struct fd f ;
int ret ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
f = fdget ( fd ) ;
if ( ! f . file )
return - EBADF ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
ret = - ENOENT ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
mutex_lock ( & kv - > lock ) ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
list_for_each_entry ( kvg , & kv - > group_list , node ) {
2022-05-04 22:14:42 +03:00
if ( kvg - > file ! = f . file )
2022-05-04 22:14:39 +03:00
continue ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
list_del ( & kvg - > node ) ;
kvm_arch_end_assignment ( dev - > kvm ) ;
2017-06-28 22:49:52 +03:00
# ifdef CONFIG_SPAPR_TCE_IOMMU
2022-05-04 22:14:41 +03:00
kvm_spapr_tce_release_vfio_group ( dev - > kvm , kvg ) ;
2017-06-28 22:49:52 +03:00
# endif
2022-05-04 22:14:44 +03:00
kvm_vfio_file_set_kvm ( kvg - > file , NULL ) ;
2022-05-04 22:14:40 +03:00
fput ( kvg - > file ) ;
2022-05-04 22:14:39 +03:00
kfree ( kvg ) ;
ret = 0 ;
break ;
}
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
mutex_unlock ( & kv - > lock ) ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
fdput ( f ) ;
2013-10-30 21:02:17 +04:00
2022-05-04 22:14:39 +03:00
kvm_vfio_update_coherency ( dev ) ;
2013-10-30 21:02:30 +04:00
2022-05-04 22:14:39 +03:00
return ret ;
}
2017-03-22 07:21:56 +03:00
# ifdef CONFIG_SPAPR_TCE_IOMMU
2022-05-04 22:14:39 +03:00
static int kvm_vfio_group_set_spapr_tce ( struct kvm_device * dev ,
void __user * arg )
{
struct kvm_vfio_spapr_tce param ;
struct kvm_vfio * kv = dev - > private ;
struct kvm_vfio_group * kvg ;
struct fd f ;
int ret ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:39 +03:00
if ( copy_from_user ( & param , arg , sizeof ( struct kvm_vfio_spapr_tce ) ) )
return - EFAULT ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:39 +03:00
f = fdget ( param . groupfd ) ;
if ( ! f . file )
return - EBADF ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:39 +03:00
ret = - ENOENT ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:39 +03:00
mutex_lock ( & kv - > lock ) ;
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:39 +03:00
list_for_each_entry ( kvg , & kv - > group_list , node ) {
2022-05-04 22:14:40 +03:00
if ( kvg - > file ! = f . file )
2022-05-04 22:14:39 +03:00
continue ;
2017-03-22 07:21:56 +03:00
2022-10-07 17:04:40 +03:00
if ( ! kvg - > iommu_group ) {
kvg - > iommu_group = kvm_vfio_file_iommu_group ( kvg - > file ) ;
if ( WARN_ON_ONCE ( ! kvg - > iommu_group ) ) {
ret = - EIO ;
goto err_fdput ;
}
2022-05-04 22:14:40 +03:00
}
2022-05-04 22:14:39 +03:00
ret = kvm_spapr_tce_attach_iommu_group ( dev - > kvm , param . tablefd ,
2022-10-07 17:04:40 +03:00
kvg - > iommu_group ) ;
2022-05-04 22:14:39 +03:00
break ;
}
2017-03-22 07:21:56 +03:00
2022-05-04 22:14:40 +03:00
err_fdput :
2022-05-17 05:34:41 +03:00
mutex_unlock ( & kv - > lock ) ;
2022-05-04 22:14:40 +03:00
fdput ( f ) ;
2022-05-04 22:14:39 +03:00
return ret ;
}
# endif
static int kvm_vfio_set_group ( struct kvm_device * dev , long attr ,
void __user * arg )
{
int32_t __user * argp = arg ;
int32_t fd ;
switch ( attr ) {
case KVM_DEV_VFIO_GROUP_ADD :
if ( get_user ( fd , argp ) )
return - EFAULT ;
return kvm_vfio_group_add ( dev , fd ) ;
case KVM_DEV_VFIO_GROUP_DEL :
if ( get_user ( fd , argp ) )
return - EFAULT ;
return kvm_vfio_group_del ( dev , fd ) ;
# ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE :
return kvm_vfio_group_set_spapr_tce ( dev , arg ) ;
# endif
2013-10-30 21:02:17 +04:00
}
return - ENXIO ;
}
static int kvm_vfio_set_attr ( struct kvm_device * dev ,
struct kvm_device_attr * attr )
{
switch ( attr - > group ) {
case KVM_DEV_VFIO_GROUP :
2022-05-04 22:14:39 +03:00
return kvm_vfio_set_group ( dev , attr - > attr ,
u64_to_user_ptr ( attr - > addr ) ) ;
2013-10-30 21:02:17 +04:00
}
return - ENXIO ;
}
static int kvm_vfio_has_attr ( struct kvm_device * dev ,
struct kvm_device_attr * attr )
{
switch ( attr - > group ) {
case KVM_DEV_VFIO_GROUP :
switch ( attr - > attr ) {
case KVM_DEV_VFIO_GROUP_ADD :
case KVM_DEV_VFIO_GROUP_DEL :
2017-03-22 07:21:56 +03:00
# ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE :
# endif
2013-10-30 21:02:17 +04:00
return 0 ;
}
break ;
}
return - ENXIO ;
}
static void kvm_vfio_destroy ( struct kvm_device * dev )
{
struct kvm_vfio * kv = dev - > private ;
struct kvm_vfio_group * kvg , * tmp ;
list_for_each_entry_safe ( kvg , tmp , & kv - > group_list , node ) {
2017-03-22 07:21:56 +03:00
# ifdef CONFIG_SPAPR_TCE_IOMMU
2022-05-04 22:14:41 +03:00
kvm_spapr_tce_release_vfio_group ( dev - > kvm , kvg ) ;
2017-03-22 07:21:56 +03:00
# endif
2022-05-04 22:14:44 +03:00
kvm_vfio_file_set_kvm ( kvg - > file , NULL ) ;
2022-05-04 22:14:40 +03:00
fput ( kvg - > file ) ;
2013-10-30 21:02:17 +04:00
list_del ( & kvg - > node ) ;
kfree ( kvg ) ;
2015-07-07 16:41:58 +03:00
kvm_arch_end_assignment ( dev - > kvm ) ;
2013-10-30 21:02:17 +04:00
}
2013-10-30 21:02:30 +04:00
kvm_vfio_update_coherency ( dev ) ;
2013-10-30 21:02:17 +04:00
kfree ( kv ) ;
kfree ( dev ) ; /* alloc by kvm_ioctl_create_device, free by .destroy */
}
2014-09-02 13:27:36 +04:00
static int kvm_vfio_create ( struct kvm_device * dev , u32 type ) ;
static struct kvm_device_ops kvm_vfio_ops = {
. name = " kvm-vfio " ,
. create = kvm_vfio_create ,
. destroy = kvm_vfio_destroy ,
. set_attr = kvm_vfio_set_attr ,
. has_attr = kvm_vfio_has_attr ,
} ;
2013-10-30 21:02:17 +04:00
static int kvm_vfio_create ( struct kvm_device * dev , u32 type )
{
struct kvm_device * tmp ;
struct kvm_vfio * kv ;
/* Only one VFIO "device" per VM */
list_for_each_entry ( tmp , & dev - > kvm - > devices , vm_node )
if ( tmp - > ops = = & kvm_vfio_ops )
return - EBUSY ;
2019-02-11 22:02:49 +03:00
kv = kzalloc ( sizeof ( * kv ) , GFP_KERNEL_ACCOUNT ) ;
2013-10-30 21:02:17 +04:00
if ( ! kv )
return - ENOMEM ;
INIT_LIST_HEAD ( & kv - > group_list ) ;
mutex_init ( & kv - > lock ) ;
dev - > private = kv ;
return 0 ;
}
2014-09-24 15:02:46 +04:00
int kvm_vfio_ops_init ( void )
2014-09-02 13:27:36 +04:00
{
return kvm_register_device_ops ( & kvm_vfio_ops , KVM_DEV_TYPE_VFIO ) ;
}
2014-10-09 14:30:08 +04:00
void kvm_vfio_ops_exit ( void )
{
kvm_unregister_device_ops ( KVM_DEV_TYPE_VFIO ) ;
}