2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-05-21 07:33:10 +04:00
/*
* VFIO : IOMMU DMA mapping support for TCE on POWER
*
* Copyright ( C ) 2013 IBM Corp . All rights reserved .
* Author : Alexey Kardashevskiy < aik @ ozlabs . ru >
*
* Derived from original vfio_iommu_type1 . c :
* Copyright ( C ) 2012 Red Hat , Inc . All rights reserved .
* Author : Alex Williamson < alex . williamson @ redhat . com >
*/
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include <linux/err.h>
# include <linux/vfio.h>
2015-06-05 09:35:25 +03:00
# include <linux/vmalloc.h>
2017-02-08 20:51:29 +03:00
# include <linux/sched/mm.h>
2017-02-08 20:51:30 +03:00
# include <linux/sched/signal.h>
2019-07-17 02:30:54 +03:00
# include <linux/mm.h>
2017-02-08 20:51:29 +03:00
2013-05-21 07:33:10 +04:00
# include <asm/iommu.h>
# include <asm/tce.h>
2015-06-05 09:35:25 +03:00
# include <asm/mmu_context.h>
2013-05-21 07:33:10 +04:00
# define DRIVER_VERSION "0.1"
# define DRIVER_AUTHOR "aik@ozlabs.ru"
# define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
static void tce_iommu_detach_group ( void * iommu_data ,
struct iommu_group * iommu_group ) ;
/*
* VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
*
* This code handles mapping and unmapping of user data buffers
* into DMA ' ble space using the IOMMU
*/
2015-06-05 09:35:25 +03:00
struct tce_iommu_group {
struct list_head next ;
struct iommu_group * grp ;
} ;
2016-11-30 09:52:05 +03:00
/*
* A container needs to remember which preregistered region it has
* referenced to do proper cleanup at the userspace process exit .
*/
struct tce_iommu_prereg {
struct list_head next ;
struct mm_iommu_table_group_mem_t * mem ;
} ;
2013-05-21 07:33:10 +04:00
/*
* The container descriptor supports only a single group per container .
* Required by the API as the container is not supplied with the IOMMU group
* at the moment of initialization .
*/
struct tce_container {
struct mutex lock ;
bool enabled ;
2015-06-05 09:35:25 +03:00
bool v2 ;
2016-11-30 09:52:03 +03:00
bool def_window_pending ;
2015-06-05 09:35:01 +03:00
unsigned long locked_pages ;
2016-11-30 09:52:04 +03:00
struct mm_struct * mm ;
2015-06-05 09:35:25 +03:00
struct iommu_table * tables [ IOMMU_TABLE_GROUP_MAX_TABLES ] ;
struct list_head group_list ;
2016-11-30 09:52:05 +03:00
struct list_head prereg_list ;
2013-05-21 07:33:10 +04:00
} ;
2016-11-30 09:52:04 +03:00
static long tce_iommu_mm_set ( struct tce_container * container )
{
if ( container - > mm ) {
if ( container - > mm = = current - > mm )
return 0 ;
return - EPERM ;
}
BUG_ON ( ! current - > mm ) ;
container - > mm = current - > mm ;
2019-12-29 18:42:57 +03:00
mmgrab ( container - > mm ) ;
2016-11-30 09:52:04 +03:00
return 0 ;
}
2016-11-30 09:52:05 +03:00
static long tce_iommu_prereg_free ( struct tce_container * container ,
struct tce_iommu_prereg * tcemem )
{
long ret ;
ret = mm_iommu_put ( container - > mm , tcemem - > mem ) ;
if ( ret )
return ret ;
list_del ( & tcemem - > next ) ;
kfree ( tcemem ) ;
return 0 ;
}
2015-06-05 09:35:25 +03:00
static long tce_iommu_unregister_pages ( struct tce_container * container ,
__u64 vaddr , __u64 size )
{
struct mm_iommu_table_group_mem_t * mem ;
2016-11-30 09:52:05 +03:00
struct tce_iommu_prereg * tcemem ;
bool found = false ;
2018-12-19 11:52:14 +03:00
long ret ;
2015-06-05 09:35:25 +03:00
if ( ( vaddr & ~ PAGE_MASK ) | | ( size & ~ PAGE_MASK ) )
return - EINVAL ;
2018-12-19 11:52:14 +03:00
mem = mm_iommu_get ( container - > mm , vaddr , size > > PAGE_SHIFT ) ;
2015-06-05 09:35:25 +03:00
if ( ! mem )
return - ENOENT ;
2016-11-30 09:52:05 +03:00
list_for_each_entry ( tcemem , & container - > prereg_list , next ) {
if ( tcemem - > mem = = mem ) {
found = true ;
break ;
}
}
if ( ! found )
2018-12-19 11:52:14 +03:00
ret = - ENOENT ;
else
ret = tce_iommu_prereg_free ( container , tcemem ) ;
2016-11-30 09:52:05 +03:00
2018-12-19 11:52:14 +03:00
mm_iommu_put ( container - > mm , mem ) ;
return ret ;
2015-06-05 09:35:25 +03:00
}
static long tce_iommu_register_pages ( struct tce_container * container ,
__u64 vaddr , __u64 size )
{
long ret = 0 ;
struct mm_iommu_table_group_mem_t * mem = NULL ;
2016-11-30 09:52:05 +03:00
struct tce_iommu_prereg * tcemem ;
2015-06-05 09:35:25 +03:00
unsigned long entries = size > > PAGE_SHIFT ;
if ( ( vaddr & ~ PAGE_MASK ) | | ( size & ~ PAGE_MASK ) | |
( ( vaddr + size ) < vaddr ) )
return - EINVAL ;
2018-12-19 11:52:14 +03:00
mem = mm_iommu_get ( container - > mm , vaddr , entries ) ;
2016-11-30 09:52:05 +03:00
if ( mem ) {
list_for_each_entry ( tcemem , & container - > prereg_list , next ) {
2018-12-19 11:52:14 +03:00
if ( tcemem - > mem = = mem ) {
ret = - EBUSY ;
goto put_exit ;
}
2016-11-30 09:52:05 +03:00
}
2018-12-19 11:52:14 +03:00
} else {
ret = mm_iommu_new ( container - > mm , vaddr , entries , & mem ) ;
if ( ret )
return ret ;
2016-11-30 09:52:05 +03:00
}
tcemem = kzalloc ( sizeof ( * tcemem ) , GFP_KERNEL ) ;
2017-03-27 06:23:40 +03:00
if ( ! tcemem ) {
2018-12-19 11:52:14 +03:00
ret = - ENOMEM ;
goto put_exit ;
2017-03-27 06:23:40 +03:00
}
2016-11-30 09:52:05 +03:00
tcemem - > mem = mem ;
list_add ( & tcemem - > next , & container - > prereg_list ) ;
2015-06-05 09:35:25 +03:00
container - > enabled = true ;
return 0 ;
2018-12-19 11:52:14 +03:00
put_exit :
mm_iommu_put ( container - > mm , mem ) ;
return ret ;
2015-06-05 09:35:25 +03:00
}
2018-12-19 11:52:15 +03:00
static bool tce_page_is_contained ( struct mm_struct * mm , unsigned long hpa ,
2019-09-24 01:34:28 +03:00
unsigned int it_page_shift )
2015-06-05 09:34:59 +03:00
{
2018-12-19 11:52:15 +03:00
struct page * page ;
unsigned long size = 0 ;
2019-09-24 01:34:28 +03:00
if ( mm_iommu_is_devmem ( mm , hpa , it_page_shift , & size ) )
return size = = ( 1UL < < it_page_shift ) ;
2018-12-19 11:52:15 +03:00
page = pfn_to_page ( hpa > > PAGE_SHIFT ) ;
2015-06-05 09:34:59 +03:00
/*
* Check that the TCE table granularity is not bigger than the size of
* a page we just found . Otherwise the hardware can get access to
* a bigger memory chunk that it should .
*/
2019-09-24 01:34:28 +03:00
return page_shift ( compound_head ( page ) ) > = it_page_shift ;
2015-06-05 09:34:59 +03:00
}
2015-06-05 09:35:25 +03:00
static inline bool tce_groups_attached ( struct tce_container * container )
{
return ! list_empty ( & container - > group_list ) ;
}
2015-06-05 09:35:09 +03:00
static long tce_iommu_find_table ( struct tce_container * container ,
phys_addr_t ioba , struct iommu_table * * ptbl )
{
long i ;
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i ) {
2015-06-05 09:35:25 +03:00
struct iommu_table * tbl = container - > tables [ i ] ;
2015-06-05 09:35:09 +03:00
if ( tbl ) {
unsigned long entry = ioba > > tbl - > it_page_shift ;
unsigned long start = tbl - > it_offset ;
unsigned long end = start + tbl - > it_size ;
if ( ( start < = entry ) & & ( entry < end ) ) {
* ptbl = tbl ;
return i ;
}
}
}
return - 1 ;
}
2015-06-05 09:35:26 +03:00
static int tce_iommu_find_free_table ( struct tce_container * container )
{
int i ;
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i ) {
if ( ! container - > tables [ i ] )
return i ;
}
return - ENOSPC ;
}
2013-05-21 07:33:10 +04:00
static int tce_iommu_enable ( struct tce_container * container )
{
int ret = 0 ;
2015-06-05 09:35:01 +03:00
unsigned long locked ;
2015-06-05 09:35:09 +03:00
struct iommu_table_group * table_group ;
2015-06-05 09:35:25 +03:00
struct tce_iommu_group * tcegrp ;
2013-05-21 07:33:10 +04:00
if ( container - > enabled )
return - EBUSY ;
/*
* When userspace pages are mapped into the IOMMU , they are effectively
* locked memory , so , theoretically , we need to update the accounting
* of locked pages on each map and unmap . For powerpc , the map unmap
* paths can be very hot , though , and the accounting would kill
* performance , especially since it would be difficult to impossible
* to handle the accounting in real mode only .
*
* To address that , rather than precisely accounting every page , we
* instead account for a worst case on locked memory when the iommu is
* enabled and disabled . The worst case upper bound on locked memory
* is the size of the whole iommu window , which is usually relatively
* small ( compared to total memory sizes ) on POWER hardware .
*
* Also we don ' t have a nice way to fail on H_PUT_TCE due to ulimits ,
* that would effectively kill the guest at random points , much better
* enforcing the limit based on the max that the guest can map .
2015-06-05 09:35:01 +03:00
*
* Unfortunately at the moment it counts whole tables , no matter how
* much memory the guest has . I . e . for 4 GB guest and 4 IOMMU groups
* each with 2 GB DMA window , 8 GB will be counted here . The reason for
* this is that we cannot tell here the amount of RAM used by the guest
* as this information is only available from KVM and VFIO is
* KVM agnostic .
2015-06-05 09:35:20 +03:00
*
* So we do not allow enabling a container without a group attached
* as there is no way to know how much we should increment
* the locked_vm counter .
2013-05-21 07:33:10 +04:00
*/
2015-06-05 09:35:25 +03:00
if ( ! tce_groups_attached ( container ) )
return - ENODEV ;
tcegrp = list_first_entry ( & container - > group_list ,
struct tce_iommu_group , next ) ;
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
2015-06-05 09:35:09 +03:00
if ( ! table_group )
return - ENODEV ;
2015-06-05 09:35:20 +03:00
if ( ! table_group - > tce32_size )
return - EPERM ;
2016-11-30 09:52:04 +03:00
ret = tce_iommu_mm_set ( container ) ;
if ( ret )
return ret ;
2015-06-05 09:35:20 +03:00
locked = table_group - > tce32_size > > PAGE_SHIFT ;
2019-07-17 02:30:54 +03:00
ret = account_locked_vm ( container - > mm , locked , true ) ;
2015-06-05 09:35:01 +03:00
if ( ret )
return ret ;
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:01 +03:00
container - > locked_pages = locked ;
container - > enabled = true ;
2013-05-21 07:33:10 +04:00
return ret ;
}
static void tce_iommu_disable ( struct tce_container * container )
{
if ( ! container - > enabled )
return ;
container - > enabled = false ;
2016-11-30 09:52:04 +03:00
BUG_ON ( ! container - > mm ) ;
2019-07-17 02:30:54 +03:00
account_locked_vm ( container - > mm , container - > locked_pages , false ) ;
2013-05-21 07:33:10 +04:00
}
static void * tce_iommu_open ( unsigned long arg )
{
struct tce_container * container ;
2015-06-05 09:35:25 +03:00
if ( ( arg ! = VFIO_SPAPR_TCE_IOMMU ) & & ( arg ! = VFIO_SPAPR_TCE_v2_IOMMU ) ) {
2013-05-21 07:33:10 +04:00
pr_err ( " tce_vfio: Wrong IOMMU type \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
container = kzalloc ( sizeof ( * container ) , GFP_KERNEL ) ;
if ( ! container )
return ERR_PTR ( - ENOMEM ) ;
mutex_init ( & container - > lock ) ;
2015-06-05 09:35:25 +03:00
INIT_LIST_HEAD_RCU ( & container - > group_list ) ;
2016-11-30 09:52:05 +03:00
INIT_LIST_HEAD_RCU ( & container - > prereg_list ) ;
2015-06-05 09:35:25 +03:00
container - > v2 = arg = = VFIO_SPAPR_TCE_v2_IOMMU ;
2013-05-21 07:33:10 +04:00
return container ;
}
2015-06-05 09:35:25 +03:00
static int tce_iommu_clear ( struct tce_container * container ,
struct iommu_table * tbl ,
unsigned long entry , unsigned long pages ) ;
2016-11-30 09:52:04 +03:00
static void tce_iommu_free_table ( struct tce_container * container ,
struct iommu_table * tbl ) ;
2015-06-05 09:35:25 +03:00
2013-05-21 07:33:10 +04:00
static void tce_iommu_release ( void * iommu_data )
{
struct tce_container * container = iommu_data ;
2015-06-05 09:35:25 +03:00
struct tce_iommu_group * tcegrp ;
2018-10-02 06:22:31 +03:00
struct tce_iommu_prereg * tcemem , * tmtmp ;
2015-06-05 09:35:25 +03:00
long i ;
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:25 +03:00
while ( tce_groups_attached ( container ) ) {
tcegrp = list_first_entry ( & container - > group_list ,
struct tce_iommu_group , next ) ;
tce_iommu_detach_group ( iommu_data , tcegrp - > grp ) ;
}
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:25 +03:00
/*
* If VFIO created a table , it was not disposed
* by tce_iommu_detach_group ( ) so do it now .
*/
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i ) {
struct iommu_table * tbl = container - > tables [ i ] ;
if ( ! tbl )
continue ;
tce_iommu_clear ( container , tbl , tbl - > it_offset , tbl - > it_size ) ;
2016-11-30 09:52:04 +03:00
tce_iommu_free_table ( container , tbl ) ;
2015-06-05 09:35:25 +03:00
}
2013-05-21 07:33:10 +04:00
2018-10-02 06:22:31 +03:00
list_for_each_entry_safe ( tcemem , tmtmp , & container - > prereg_list , next )
WARN_ON ( tce_iommu_prereg_free ( container , tcemem ) ) ;
2016-11-30 09:52:05 +03:00
2015-06-05 09:35:03 +03:00
tce_iommu_disable ( container ) ;
2016-11-30 09:52:04 +03:00
if ( container - > mm )
mmdrop ( container - > mm ) ;
2013-05-21 07:33:10 +04:00
mutex_destroy ( & container - > lock ) ;
kfree ( container ) ;
}
2015-06-05 09:35:03 +03:00
static void tce_iommu_unuse_page ( struct tce_container * container ,
2015-06-05 09:35:15 +03:00
unsigned long hpa )
2015-06-05 09:35:03 +03:00
{
struct page * page ;
2015-06-05 09:35:15 +03:00
page = pfn_to_page ( hpa > > PAGE_SHIFT ) ;
2020-07-27 22:43:38 +03:00
unpin_user_page ( page ) ;
2015-06-05 09:35:03 +03:00
}
2016-11-30 09:52:04 +03:00
static int tce_iommu_prereg_ua_to_hpa ( struct tce_container * container ,
2018-07-17 10:19:12 +03:00
unsigned long tce , unsigned long shift ,
2015-06-05 09:35:25 +03:00
unsigned long * phpa , struct mm_iommu_table_group_mem_t * * pmem )
{
long ret = 0 ;
struct mm_iommu_table_group_mem_t * mem ;
2018-07-17 10:19:12 +03:00
mem = mm_iommu_lookup ( container - > mm , tce , 1ULL < < shift ) ;
2015-06-05 09:35:25 +03:00
if ( ! mem )
return - EINVAL ;
2018-07-17 10:19:13 +03:00
ret = mm_iommu_ua_to_hpa ( mem , tce , shift , phpa ) ;
2015-06-05 09:35:25 +03:00
if ( ret )
return - EINVAL ;
* pmem = mem ;
return 0 ;
}
2016-11-30 09:52:04 +03:00
static void tce_iommu_unuse_page_v2 ( struct tce_container * container ,
struct iommu_table * tbl , unsigned long entry )
2015-06-05 09:35:25 +03:00
{
struct mm_iommu_table_group_mem_t * mem = NULL ;
int ret ;
unsigned long hpa = 0 ;
2018-10-15 13:08:41 +03:00
__be64 * pua = IOMMU_TABLE_USERSPACE_ENTRY_RO ( tbl , entry ) ;
2015-06-05 09:35:25 +03:00
2016-11-30 09:52:04 +03:00
if ( ! pua )
2015-06-05 09:35:25 +03:00
return ;
2018-07-04 09:13:46 +03:00
ret = tce_iommu_prereg_ua_to_hpa ( container , be64_to_cpu ( * pua ) ,
2018-08-13 08:59:06 +03:00
tbl - > it_page_shift , & hpa , & mem ) ;
2015-06-05 09:35:25 +03:00
if ( ret )
2018-07-04 09:13:46 +03:00
pr_debug ( " %s: tce %llx at #%lx was not cached, ret=%d \n " ,
__func__ , be64_to_cpu ( * pua ) , entry , ret ) ;
2015-06-05 09:35:25 +03:00
if ( mem )
mm_iommu_mapped_dec ( mem ) ;
2018-07-04 09:13:46 +03:00
* pua = cpu_to_be64 ( 0 ) ;
2015-06-05 09:35:25 +03:00
}
2015-06-05 09:34:58 +03:00
static int tce_iommu_clear ( struct tce_container * container ,
struct iommu_table * tbl ,
unsigned long entry , unsigned long pages )
{
2015-06-05 09:35:15 +03:00
unsigned long oldhpa ;
long ret ;
enum dma_data_direction direction ;
2019-08-29 11:52:50 +03:00
unsigned long lastentry = entry + pages , firstentry = entry ;
2018-10-15 13:08:41 +03:00
for ( ; entry < lastentry ; + + entry ) {
if ( tbl - > it_indirect_levels & & tbl - > it_userspace ) {
/*
* For multilevel tables , we can take a shortcut here
* and skip some TCEs as we know that the userspace
* addresses cache is a mirror of the real TCE table
* and if it is missing some indirect levels , then
* the hardware table does not have them allocated
* either and therefore does not require updating .
*/
__be64 * pua = IOMMU_TABLE_USERSPACE_ENTRY_RO ( tbl ,
entry ) ;
if ( ! pua ) {
/* align to level_size which is power of two */
entry | = tbl - > it_level_size - 1 ;
continue ;
}
}
2015-06-05 09:34:58 +03:00
2017-10-02 21:39:11 +03:00
cond_resched ( ) ;
2015-06-05 09:35:15 +03:00
direction = DMA_NONE ;
oldhpa = 0 ;
2019-08-29 11:52:50 +03:00
ret = iommu_tce_xchg_no_kill ( container - > mm , tbl , entry , & oldhpa ,
2018-12-19 11:52:15 +03:00
& direction ) ;
2015-06-05 09:35:15 +03:00
if ( ret )
continue ;
if ( direction = = DMA_NONE )
2015-06-05 09:34:58 +03:00
continue ;
2015-06-05 09:35:25 +03:00
if ( container - > v2 ) {
2016-11-30 09:52:04 +03:00
tce_iommu_unuse_page_v2 ( container , tbl , entry ) ;
2015-06-05 09:35:25 +03:00
continue ;
}
2015-06-05 09:35:15 +03:00
tce_iommu_unuse_page ( container , oldhpa ) ;
2015-06-05 09:34:58 +03:00
}
2019-08-29 11:52:50 +03:00
iommu_tce_kill ( tbl , firstentry , pages ) ;
2015-06-05 09:34:58 +03:00
return 0 ;
}
2015-06-05 09:35:03 +03:00
static int tce_iommu_use_page ( unsigned long tce , unsigned long * hpa )
{
struct page * page = NULL ;
enum dma_data_direction direction = iommu_tce_direction ( tce ) ;
2020-07-27 22:43:38 +03:00
if ( pin_user_pages_fast ( tce & PAGE_MASK , 1 ,
2019-05-14 03:17:11 +03:00
direction ! = DMA_TO_DEVICE ? FOLL_WRITE : 0 ,
& page ) ! = 1 )
2015-06-05 09:35:03 +03:00
return - EFAULT ;
* hpa = __pa ( ( unsigned long ) page_address ( page ) ) ;
return 0 ;
}
2015-06-05 09:34:58 +03:00
static long tce_iommu_build ( struct tce_container * container ,
struct iommu_table * tbl ,
2015-06-05 09:35:15 +03:00
unsigned long entry , unsigned long tce , unsigned long pages ,
enum dma_data_direction direction )
2015-06-05 09:34:58 +03:00
{
long i , ret = 0 ;
2015-06-05 09:35:03 +03:00
unsigned long hpa ;
2015-06-05 09:35:15 +03:00
enum dma_data_direction dirtmp ;
2015-06-05 09:34:58 +03:00
for ( i = 0 ; i < pages ; + + i ) {
unsigned long offset = tce & IOMMU_PAGE_MASK ( tbl ) & ~ PAGE_MASK ;
2015-06-05 09:35:03 +03:00
ret = tce_iommu_use_page ( tce , & hpa ) ;
if ( ret )
2015-06-05 09:34:58 +03:00
break ;
2015-06-05 09:34:59 +03:00
2018-12-19 11:52:15 +03:00
if ( ! tce_page_is_contained ( container - > mm , hpa ,
tbl - > it_page_shift ) ) {
2015-06-05 09:34:59 +03:00
ret = - EPERM ;
break ;
}
2015-06-05 09:35:03 +03:00
hpa | = offset ;
2015-06-05 09:35:15 +03:00
dirtmp = direction ;
2019-08-29 11:52:50 +03:00
ret = iommu_tce_xchg_no_kill ( container - > mm , tbl , entry + i ,
& hpa , & dirtmp ) ;
2015-06-05 09:34:58 +03:00
if ( ret ) {
2015-06-05 09:35:03 +03:00
tce_iommu_unuse_page ( container , hpa ) ;
2015-06-05 09:34:58 +03:00
pr_err ( " iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld \n " ,
__func__ , entry < < tbl - > it_page_shift ,
tce , ret ) ;
break ;
}
2015-06-05 09:35:15 +03:00
if ( dirtmp ! = DMA_NONE )
tce_iommu_unuse_page ( container , hpa ) ;
2015-06-05 09:35:00 +03:00
tce + = IOMMU_PAGE_SIZE ( tbl ) ;
2015-06-05 09:34:58 +03:00
}
if ( ret )
tce_iommu_clear ( container , tbl , entry , i ) ;
2019-08-29 11:52:50 +03:00
else
iommu_tce_kill ( tbl , entry , pages ) ;
2015-06-05 09:34:58 +03:00
return ret ;
}
2015-06-05 09:35:25 +03:00
static long tce_iommu_build_v2 ( struct tce_container * container ,
struct iommu_table * tbl ,
unsigned long entry , unsigned long tce , unsigned long pages ,
enum dma_data_direction direction )
{
long i , ret = 0 ;
unsigned long hpa ;
enum dma_data_direction dirtmp ;
for ( i = 0 ; i < pages ; + + i ) {
struct mm_iommu_table_group_mem_t * mem = NULL ;
2018-07-04 09:13:46 +03:00
__be64 * pua = IOMMU_TABLE_USERSPACE_ENTRY ( tbl , entry + i ) ;
2015-06-05 09:35:25 +03:00
2016-11-30 09:52:04 +03:00
ret = tce_iommu_prereg_ua_to_hpa ( container ,
2018-07-17 10:19:12 +03:00
tce , tbl - > it_page_shift , & hpa , & mem ) ;
2015-06-05 09:35:25 +03:00
if ( ret )
break ;
2018-12-19 11:52:15 +03:00
if ( ! tce_page_is_contained ( container - > mm , hpa ,
tbl - > it_page_shift ) ) {
2015-06-05 09:35:25 +03:00
ret = - EPERM ;
break ;
}
/* Preserve offset within IOMMU page */
hpa | = tce & IOMMU_PAGE_MASK ( tbl ) & ~ PAGE_MASK ;
dirtmp = direction ;
/* The registered region is being unregistered */
if ( mm_iommu_mapped_inc ( mem ) )
break ;
2019-08-29 11:52:50 +03:00
ret = iommu_tce_xchg_no_kill ( container - > mm , tbl , entry + i ,
& hpa , & dirtmp ) ;
2015-06-05 09:35:25 +03:00
if ( ret ) {
/* dirtmp cannot be DMA_NONE here */
2016-11-30 09:52:04 +03:00
tce_iommu_unuse_page_v2 ( container , tbl , entry + i ) ;
2015-06-05 09:35:25 +03:00
pr_err ( " iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld \n " ,
__func__ , entry < < tbl - > it_page_shift ,
tce , ret ) ;
break ;
}
if ( dirtmp ! = DMA_NONE )
2016-11-30 09:52:04 +03:00
tce_iommu_unuse_page_v2 ( container , tbl , entry + i ) ;
2015-06-05 09:35:25 +03:00
2018-07-04 09:13:46 +03:00
* pua = cpu_to_be64 ( tce ) ;
2015-06-05 09:35:25 +03:00
tce + = IOMMU_PAGE_SIZE ( tbl ) ;
}
if ( ret )
tce_iommu_clear ( container , tbl , entry , i ) ;
2019-08-29 11:52:50 +03:00
else
iommu_tce_kill ( tbl , entry , pages ) ;
2015-06-05 09:35:25 +03:00
return ret ;
}
2015-06-05 09:35:23 +03:00
static long tce_iommu_create_table ( struct tce_container * container ,
struct iommu_table_group * table_group ,
int num ,
__u32 page_shift ,
__u64 window_size ,
__u32 levels ,
struct iommu_table * * ptbl )
{
long ret , table_size ;
table_size = table_group - > ops - > get_table_size ( page_shift , window_size ,
levels ) ;
if ( ! table_size )
return - EINVAL ;
2019-07-17 02:30:54 +03:00
ret = account_locked_vm ( container - > mm , table_size > > PAGE_SHIFT , true ) ;
2015-06-05 09:35:23 +03:00
if ( ret )
return ret ;
ret = table_group - > ops - > create_table ( table_group , num ,
page_shift , window_size , levels , ptbl ) ;
WARN_ON ( ! ret & & ! ( * ptbl ) - > it_ops - > free ) ;
2018-07-04 09:13:49 +03:00
WARN_ON ( ! ret & & ( ( * ptbl ) - > it_allocated_size > table_size ) ) ;
2015-06-05 09:35:23 +03:00
return ret ;
}
2016-11-30 09:52:04 +03:00
static void tce_iommu_free_table ( struct tce_container * container ,
struct iommu_table * tbl )
2015-06-05 09:35:23 +03:00
{
unsigned long pages = tbl - > it_allocated_size > > PAGE_SHIFT ;
2017-03-22 07:21:50 +03:00
iommu_tce_table_put ( tbl ) ;
2019-07-17 02:30:54 +03:00
account_locked_vm ( container - > mm , pages , false ) ;
2015-06-05 09:35:23 +03:00
}
2015-06-05 09:35:26 +03:00
static long tce_iommu_create_window ( struct tce_container * container ,
__u32 page_shift , __u64 window_size , __u32 levels ,
__u64 * start_addr )
{
struct tce_iommu_group * tcegrp ;
struct iommu_table_group * table_group ;
struct iommu_table * tbl = NULL ;
long ret , num ;
num = tce_iommu_find_free_table ( container ) ;
if ( num < 0 )
return num ;
/* Get the first group for ops::create_table */
tcegrp = list_first_entry ( & container - > group_list ,
struct tce_iommu_group , next ) ;
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
if ( ! table_group )
return - EFAULT ;
if ( ! ( table_group - > pgsizes & ( 1ULL < < page_shift ) ) )
return - EINVAL ;
if ( ! table_group - > ops - > set_window | | ! table_group - > ops - > unset_window | |
! table_group - > ops - > get_table_size | |
! table_group - > ops - > create_table )
return - EPERM ;
/* Create TCE table */
ret = tce_iommu_create_table ( container , table_group , num ,
page_shift , window_size , levels , & tbl ) ;
if ( ret )
return ret ;
BUG_ON ( ! tbl - > it_ops - > free ) ;
/*
* Program the table to every group .
* Groups have been tested for compatibility at the attach time .
*/
list_for_each_entry ( tcegrp , & container - > group_list , next ) {
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
ret = table_group - > ops - > set_window ( table_group , num , tbl ) ;
if ( ret )
goto unset_exit ;
}
container - > tables [ num ] = tbl ;
/* Return start address assigned by platform in create_table() */
* start_addr = tbl - > it_offset < < tbl - > it_page_shift ;
return 0 ;
unset_exit :
list_for_each_entry ( tcegrp , & container - > group_list , next ) {
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
table_group - > ops - > unset_window ( table_group , num ) ;
}
2016-11-30 09:52:04 +03:00
tce_iommu_free_table ( container , tbl ) ;
2015-06-05 09:35:26 +03:00
return ret ;
}
static long tce_iommu_remove_window ( struct tce_container * container ,
__u64 start_addr )
{
struct iommu_table_group * table_group = NULL ;
struct iommu_table * tbl ;
struct tce_iommu_group * tcegrp ;
int num ;
num = tce_iommu_find_table ( container , start_addr , & tbl ) ;
if ( num < 0 )
return - EINVAL ;
BUG_ON ( ! tbl - > it_size ) ;
/* Detach groups from IOMMUs */
list_for_each_entry ( tcegrp , & container - > group_list , next ) {
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
/*
* SPAPR TCE IOMMU exposes the default DMA window to
* the guest via dma32_window_start / size of
* VFIO_IOMMU_SPAPR_TCE_GET_INFO . Some platforms allow
* the userspace to remove this window , some do not so
* here we check for the platform capability .
*/
if ( ! table_group - > ops | | ! table_group - > ops - > unset_window )
return - EPERM ;
table_group - > ops - > unset_window ( table_group , num ) ;
}
/* Free table */
tce_iommu_clear ( container , tbl , tbl - > it_offset , tbl - > it_size ) ;
2016-11-30 09:52:04 +03:00
tce_iommu_free_table ( container , tbl ) ;
2015-06-05 09:35:26 +03:00
container - > tables [ num ] = NULL ;
return 0 ;
}
2016-11-30 09:52:02 +03:00
static long tce_iommu_create_default_window ( struct tce_container * container )
{
long ret ;
__u64 start_addr = 0 ;
struct tce_iommu_group * tcegrp ;
struct iommu_table_group * table_group ;
2016-11-30 09:52:03 +03:00
if ( ! container - > def_window_pending )
return 0 ;
2016-11-30 09:52:02 +03:00
if ( ! tce_groups_attached ( container ) )
return - ENODEV ;
tcegrp = list_first_entry ( & container - > group_list ,
struct tce_iommu_group , next ) ;
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
if ( ! table_group )
return - ENODEV ;
ret = tce_iommu_create_window ( container , IOMMU_PAGE_SHIFT_4K ,
table_group - > tce32_size , 1 , & start_addr ) ;
WARN_ON_ONCE ( ! ret & & start_addr ) ;
2016-11-30 09:52:03 +03:00
if ( ! ret )
container - > def_window_pending = false ;
2016-11-30 09:52:02 +03:00
return ret ;
}
2013-05-21 07:33:10 +04:00
static long tce_iommu_ioctl ( void * iommu_data ,
unsigned int cmd , unsigned long arg )
{
struct tce_container * container = iommu_data ;
2015-06-05 09:35:26 +03:00
unsigned long minsz , ddwsz ;
2013-05-21 07:33:10 +04:00
long ret ;
switch ( cmd ) {
case VFIO_CHECK_EXTENSION :
2014-06-10 05:41:57 +04:00
switch ( arg ) {
case VFIO_SPAPR_TCE_IOMMU :
2015-06-05 09:35:25 +03:00
case VFIO_SPAPR_TCE_v2_IOMMU :
2014-06-10 05:41:57 +04:00
ret = 1 ;
break ;
default :
ret = vfio_spapr_iommu_eeh_ioctl ( NULL , cmd , arg ) ;
break ;
}
return ( ret < 0 ) ? 0 : ret ;
2016-11-30 09:52:04 +03:00
}
/*
* Sanity check to prevent one userspace from manipulating
* another userspace mm .
*/
BUG_ON ( ! container ) ;
if ( container - > mm & & container - > mm ! = current - > mm )
return - EPERM ;
2013-05-21 07:33:10 +04:00
2016-11-30 09:52:04 +03:00
switch ( cmd ) {
2013-05-21 07:33:10 +04:00
case VFIO_IOMMU_SPAPR_TCE_GET_INFO : {
struct vfio_iommu_spapr_tce_info info ;
2015-06-05 09:35:25 +03:00
struct tce_iommu_group * tcegrp ;
2015-06-05 09:35:09 +03:00
struct iommu_table_group * table_group ;
2015-06-05 09:35:25 +03:00
if ( ! tce_groups_attached ( container ) )
2015-06-05 09:35:09 +03:00
return - ENXIO ;
2015-06-05 09:35:25 +03:00
tcegrp = list_first_entry ( & container - > group_list ,
struct tce_iommu_group , next ) ;
table_group = iommu_group_get_iommudata ( tcegrp - > grp ) ;
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:20 +03:00
if ( ! table_group )
2013-05-21 07:33:10 +04:00
return - ENXIO ;
minsz = offsetofend ( struct vfio_iommu_spapr_tce_info ,
dma32_window_size ) ;
if ( copy_from_user ( & info , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( info . argsz < minsz )
return - EINVAL ;
2015-06-05 09:35:20 +03:00
info . dma32_window_start = table_group - > tce32_start ;
info . dma32_window_size = table_group - > tce32_size ;
2013-05-21 07:33:10 +04:00
info . flags = 0 ;
2015-06-05 09:35:26 +03:00
memset ( & info . ddw , 0 , sizeof ( info . ddw ) ) ;
if ( table_group - > max_dynamic_windows_supported & &
container - > v2 ) {
info . flags | = VFIO_IOMMU_SPAPR_INFO_DDW ;
info . ddw . pgsizes = table_group - > pgsizes ;
info . ddw . max_dynamic_windows_supported =
table_group - > max_dynamic_windows_supported ;
info . ddw . levels = table_group - > max_levels ;
}
ddwsz = offsetofend ( struct vfio_iommu_spapr_tce_info , ddw ) ;
if ( info . argsz > = ddwsz )
minsz = ddwsz ;
2013-05-21 07:33:10 +04:00
if ( copy_to_user ( ( void __user * ) arg , & info , minsz ) )
return - EFAULT ;
return 0 ;
}
case VFIO_IOMMU_MAP_DMA : {
struct vfio_iommu_type1_dma_map param ;
2015-06-05 09:35:09 +03:00
struct iommu_table * tbl = NULL ;
long num ;
2015-06-05 09:35:15 +03:00
enum dma_data_direction direction ;
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:02 +03:00
if ( ! container - > enabled )
return - EPERM ;
2013-05-21 07:33:10 +04:00
minsz = offsetofend ( struct vfio_iommu_type1_dma_map , size ) ;
if ( copy_from_user ( & param , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( param . argsz < minsz )
return - EINVAL ;
if ( param . flags & ~ ( VFIO_DMA_MAP_FLAG_READ |
VFIO_DMA_MAP_FLAG_WRITE ) )
return - EINVAL ;
2016-11-30 09:52:03 +03:00
ret = tce_iommu_create_default_window ( container ) ;
if ( ret )
return ret ;
2015-06-05 09:35:09 +03:00
num = tce_iommu_find_table ( container , param . iova , & tbl ) ;
if ( num < 0 )
return - ENXIO ;
2015-06-05 09:35:00 +03:00
if ( ( param . size & ~ IOMMU_PAGE_MASK ( tbl ) ) | |
( param . vaddr & ~ IOMMU_PAGE_MASK ( tbl ) ) )
2013-05-21 07:33:10 +04:00
return - EINVAL ;
/* iova is checked by the IOMMU API */
2015-06-05 09:35:15 +03:00
if ( param . flags & VFIO_DMA_MAP_FLAG_READ ) {
if ( param . flags & VFIO_DMA_MAP_FLAG_WRITE )
direction = DMA_BIDIRECTIONAL ;
else
direction = DMA_TO_DEVICE ;
} else {
if ( param . flags & VFIO_DMA_MAP_FLAG_WRITE )
direction = DMA_FROM_DEVICE ;
else
return - EINVAL ;
}
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:15 +03:00
ret = iommu_tce_put_param_check ( tbl , param . iova , param . vaddr ) ;
2013-05-21 07:33:10 +04:00
if ( ret )
return ret ;
2015-06-05 09:35:25 +03:00
if ( container - > v2 )
ret = tce_iommu_build_v2 ( container , tbl ,
param . iova > > tbl - > it_page_shift ,
param . vaddr ,
param . size > > tbl - > it_page_shift ,
direction ) ;
else
ret = tce_iommu_build ( container , tbl ,
param . iova > > tbl - > it_page_shift ,
param . vaddr ,
param . size > > tbl - > it_page_shift ,
direction ) ;
2013-05-21 07:33:10 +04:00
iommu_flush_tce ( tbl ) ;
return ret ;
}
case VFIO_IOMMU_UNMAP_DMA : {
struct vfio_iommu_type1_dma_unmap param ;
2015-06-05 09:35:09 +03:00
struct iommu_table * tbl = NULL ;
long num ;
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:02 +03:00
if ( ! container - > enabled )
return - EPERM ;
2013-05-21 07:33:10 +04:00
minsz = offsetofend ( struct vfio_iommu_type1_dma_unmap ,
size ) ;
if ( copy_from_user ( & param , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( param . argsz < minsz )
return - EINVAL ;
/* No flag is supported now */
if ( param . flags )
return - EINVAL ;
2016-11-30 09:52:03 +03:00
ret = tce_iommu_create_default_window ( container ) ;
if ( ret )
return ret ;
2015-06-05 09:35:09 +03:00
num = tce_iommu_find_table ( container , param . iova , & tbl ) ;
if ( num < 0 )
return - ENXIO ;
2015-06-05 09:35:00 +03:00
if ( param . size & ~ IOMMU_PAGE_MASK ( tbl ) )
2013-05-21 07:33:10 +04:00
return - EINVAL ;
ret = iommu_tce_clear_param_check ( tbl , param . iova , 0 ,
2015-06-05 09:35:00 +03:00
param . size > > tbl - > it_page_shift ) ;
2013-05-21 07:33:10 +04:00
if ( ret )
return ret ;
2015-06-05 09:34:58 +03:00
ret = tce_iommu_clear ( container , tbl ,
2015-06-05 09:35:00 +03:00
param . iova > > tbl - > it_page_shift ,
param . size > > tbl - > it_page_shift ) ;
2013-05-21 07:33:10 +04:00
iommu_flush_tce ( tbl ) ;
return ret ;
}
2015-06-05 09:35:25 +03:00
case VFIO_IOMMU_SPAPR_REGISTER_MEMORY : {
struct vfio_iommu_spapr_register_memory param ;
if ( ! container - > v2 )
break ;
minsz = offsetofend ( struct vfio_iommu_spapr_register_memory ,
size ) ;
2016-11-30 09:52:04 +03:00
ret = tce_iommu_mm_set ( container ) ;
if ( ret )
return ret ;
2015-06-05 09:35:25 +03:00
if ( copy_from_user ( & param , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( param . argsz < minsz )
return - EINVAL ;
/* No flag is supported now */
if ( param . flags )
return - EINVAL ;
mutex_lock ( & container - > lock ) ;
ret = tce_iommu_register_pages ( container , param . vaddr ,
param . size ) ;
mutex_unlock ( & container - > lock ) ;
return ret ;
}
case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY : {
struct vfio_iommu_spapr_register_memory param ;
if ( ! container - > v2 )
break ;
2016-11-30 09:52:04 +03:00
if ( ! container - > mm )
return - EPERM ;
2015-06-05 09:35:25 +03:00
minsz = offsetofend ( struct vfio_iommu_spapr_register_memory ,
size ) ;
if ( copy_from_user ( & param , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( param . argsz < minsz )
return - EINVAL ;
/* No flag is supported now */
if ( param . flags )
return - EINVAL ;
mutex_lock ( & container - > lock ) ;
ret = tce_iommu_unregister_pages ( container , param . vaddr ,
param . size ) ;
mutex_unlock ( & container - > lock ) ;
return ret ;
}
2013-05-21 07:33:10 +04:00
case VFIO_IOMMU_ENABLE :
2015-06-05 09:35:25 +03:00
if ( container - > v2 )
break ;
2013-05-21 07:33:10 +04:00
mutex_lock ( & container - > lock ) ;
ret = tce_iommu_enable ( container ) ;
mutex_unlock ( & container - > lock ) ;
return ret ;
case VFIO_IOMMU_DISABLE :
2015-06-05 09:35:25 +03:00
if ( container - > v2 )
break ;
2013-05-21 07:33:10 +04:00
mutex_lock ( & container - > lock ) ;
tce_iommu_disable ( container ) ;
mutex_unlock ( & container - > lock ) ;
return 0 ;
2014-06-10 05:41:57 +04:00
2015-06-05 09:35:25 +03:00
case VFIO_EEH_PE_OP : {
struct tce_iommu_group * tcegrp ;
ret = 0 ;
list_for_each_entry ( tcegrp , & container - > group_list , next ) {
ret = vfio_spapr_iommu_eeh_ioctl ( tcegrp - > grp ,
cmd , arg ) ;
if ( ret )
return ret ;
}
return ret ;
}
2015-06-05 09:35:26 +03:00
case VFIO_IOMMU_SPAPR_TCE_CREATE : {
struct vfio_iommu_spapr_tce_create create ;
if ( ! container - > v2 )
break ;
2016-11-30 09:52:04 +03:00
ret = tce_iommu_mm_set ( container ) ;
if ( ret )
return ret ;
2015-06-05 09:35:26 +03:00
if ( ! tce_groups_attached ( container ) )
return - ENXIO ;
minsz = offsetofend ( struct vfio_iommu_spapr_tce_create ,
start_addr ) ;
if ( copy_from_user ( & create , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( create . argsz < minsz )
return - EINVAL ;
if ( create . flags )
return - EINVAL ;
mutex_lock ( & container - > lock ) ;
2016-11-30 09:52:03 +03:00
ret = tce_iommu_create_default_window ( container ) ;
2017-02-01 06:26:16 +03:00
if ( ! ret )
ret = tce_iommu_create_window ( container ,
create . page_shift ,
create . window_size , create . levels ,
& create . start_addr ) ;
2015-06-05 09:35:26 +03:00
mutex_unlock ( & container - > lock ) ;
if ( ! ret & & copy_to_user ( ( void __user * ) arg , & create , minsz ) )
ret = - EFAULT ;
return ret ;
}
case VFIO_IOMMU_SPAPR_TCE_REMOVE : {
struct vfio_iommu_spapr_tce_remove remove ;
if ( ! container - > v2 )
break ;
2016-11-30 09:52:04 +03:00
ret = tce_iommu_mm_set ( container ) ;
if ( ret )
return ret ;
2015-06-05 09:35:26 +03:00
if ( ! tce_groups_attached ( container ) )
return - ENXIO ;
minsz = offsetofend ( struct vfio_iommu_spapr_tce_remove ,
start_addr ) ;
if ( copy_from_user ( & remove , ( void __user * ) arg , minsz ) )
return - EFAULT ;
if ( remove . argsz < minsz )
return - EINVAL ;
if ( remove . flags )
return - EINVAL ;
2016-11-30 09:52:03 +03:00
if ( container - > def_window_pending & & ! remove . start_addr ) {
container - > def_window_pending = false ;
return 0 ;
}
2015-06-05 09:35:26 +03:00
mutex_lock ( & container - > lock ) ;
ret = tce_iommu_remove_window ( container , remove . start_addr ) ;
mutex_unlock ( & container - > lock ) ;
return ret ;
}
2013-05-21 07:33:10 +04:00
}
return - ENOTTY ;
}
2015-06-05 09:35:10 +03:00
static void tce_iommu_release_ownership ( struct tce_container * container ,
struct iommu_table_group * table_group )
{
int i ;
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i ) {
2015-06-05 09:35:25 +03:00
struct iommu_table * tbl = container - > tables [ i ] ;
2015-06-05 09:35:10 +03:00
if ( ! tbl )
continue ;
tce_iommu_clear ( container , tbl , tbl - > it_offset , tbl - > it_size ) ;
if ( tbl - > it_map )
iommu_release_ownership ( tbl ) ;
2015-06-05 09:35:25 +03:00
container - > tables [ i ] = NULL ;
2015-06-05 09:35:10 +03:00
}
}
static int tce_iommu_take_ownership ( struct tce_container * container ,
struct iommu_table_group * table_group )
{
int i , j , rc = 0 ;
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i ) {
struct iommu_table * tbl = table_group - > tables [ i ] ;
if ( ! tbl | | ! tbl - > it_map )
continue ;
2016-11-30 09:52:01 +03:00
rc = iommu_take_ownership ( tbl ) ;
2015-06-05 09:35:10 +03:00
if ( rc ) {
for ( j = 0 ; j < i ; + + j )
iommu_release_ownership (
table_group - > tables [ j ] ) ;
return rc ;
}
}
2015-06-05 09:35:25 +03:00
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i )
container - > tables [ i ] = table_group - > tables [ i ] ;
2015-06-05 09:35:10 +03:00
return 0 ;
}
static void tce_iommu_release_ownership_ddw ( struct tce_container * container ,
struct iommu_table_group * table_group )
{
2015-06-05 09:35:23 +03:00
long i ;
if ( ! table_group - > ops - > unset_window ) {
WARN_ON_ONCE ( 1 ) ;
return ;
}
2015-06-05 09:35:25 +03:00
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i )
2019-02-11 10:49:17 +03:00
if ( container - > tables [ i ] )
table_group - > ops - > unset_window ( table_group , i ) ;
2015-06-05 09:35:23 +03:00
2015-06-05 09:35:10 +03:00
table_group - > ops - > release_ownership ( table_group ) ;
}
static long tce_iommu_take_ownership_ddw ( struct tce_container * container ,
struct iommu_table_group * table_group )
{
2017-02-07 09:26:57 +03:00
long i , ret = 0 ;
2015-06-05 09:35:23 +03:00
if ( ! table_group - > ops - > create_table | | ! table_group - > ops - > set_window | |
! table_group - > ops - > release_ownership ) {
WARN_ON_ONCE ( 1 ) ;
return - EFAULT ;
}
2015-06-05 09:35:10 +03:00
table_group - > ops - > take_ownership ( table_group ) ;
2017-02-07 09:26:57 +03:00
/* Set all windows to the new group */
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i ) {
struct iommu_table * tbl = container - > tables [ i ] ;
if ( ! tbl )
continue ;
ret = table_group - > ops - > set_window ( table_group , i , tbl ) ;
if ( ret )
goto release_exit ;
}
2015-06-05 09:35:25 +03:00
return 0 ;
2017-02-07 09:26:57 +03:00
release_exit :
for ( i = 0 ; i < IOMMU_TABLE_GROUP_MAX_TABLES ; + + i )
table_group - > ops - > unset_window ( table_group , i ) ;
table_group - > ops - > release_ownership ( table_group ) ;
return ret ;
2015-06-05 09:35:10 +03:00
}
2013-05-21 07:33:10 +04:00
static int tce_iommu_attach_group ( void * iommu_data ,
struct iommu_group * iommu_group )
{
2019-08-19 04:51:17 +03:00
int ret = 0 ;
2013-05-21 07:33:10 +04:00
struct tce_container * container = iommu_data ;
2015-06-05 09:35:09 +03:00
struct iommu_table_group * table_group ;
2015-06-05 09:35:25 +03:00
struct tce_iommu_group * tcegrp = NULL ;
2013-05-21 07:33:10 +04:00
mutex_lock ( & container - > lock ) ;
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id ( iommu_group ) , iommu_group ) ; */
2015-06-05 09:35:25 +03:00
table_group = iommu_group_get_iommudata ( iommu_group ) ;
2017-01-24 19:50:26 +03:00
if ( ! table_group ) {
ret = - ENODEV ;
goto unlock_exit ;
}
2015-06-05 09:35:25 +03:00
if ( tce_groups_attached ( container ) & & ( ! table_group - > ops | |
! table_group - > ops - > take_ownership | |
! table_group - > ops - > release_ownership ) ) {
2013-05-21 07:33:10 +04:00
ret = - EBUSY ;
2015-06-05 09:35:04 +03:00
goto unlock_exit ;
}
2015-06-05 09:35:25 +03:00
/* Check if new group has the same iommu_ops (i.e. compatible) */
list_for_each_entry ( tcegrp , & container - > group_list , next ) {
struct iommu_table_group * table_group_tmp ;
if ( tcegrp - > grp = = iommu_group ) {
pr_warn ( " tce_vfio: Group %d is already attached \n " ,
iommu_group_id ( iommu_group ) ) ;
ret = - EBUSY ;
goto unlock_exit ;
}
table_group_tmp = iommu_group_get_iommudata ( tcegrp - > grp ) ;
2016-04-29 11:55:15 +03:00
if ( table_group_tmp - > ops - > create_table ! =
table_group - > ops - > create_table ) {
2015-06-05 09:35:25 +03:00
pr_warn ( " tce_vfio: Group %d is incompatible with group %d \n " ,
iommu_group_id ( iommu_group ) ,
iommu_group_id ( tcegrp - > grp ) ) ;
ret = - EPERM ;
goto unlock_exit ;
}
2013-05-21 07:33:10 +04:00
}
2015-06-05 09:35:25 +03:00
tcegrp = kzalloc ( sizeof ( * tcegrp ) , GFP_KERNEL ) ;
if ( ! tcegrp ) {
ret = - ENOMEM ;
2015-06-05 09:35:09 +03:00
goto unlock_exit ;
}
2015-06-05 09:35:10 +03:00
if ( ! table_group - > ops | | ! table_group - > ops - > take_ownership | |
2016-11-30 09:52:02 +03:00
! table_group - > ops - > release_ownership ) {
2017-03-24 09:44:06 +03:00
if ( container - > v2 ) {
ret = - EPERM ;
2019-08-19 04:51:17 +03:00
goto free_exit ;
2017-03-24 09:44:06 +03:00
}
2015-06-05 09:35:10 +03:00
ret = tce_iommu_take_ownership ( container , table_group ) ;
2016-11-30 09:52:02 +03:00
} else {
2017-03-24 09:44:06 +03:00
if ( ! container - > v2 ) {
ret = - EPERM ;
2019-08-19 04:51:17 +03:00
goto free_exit ;
2017-03-24 09:44:06 +03:00
}
2015-06-05 09:35:10 +03:00
ret = tce_iommu_take_ownership_ddw ( container , table_group ) ;
2016-11-30 09:52:02 +03:00
if ( ! tce_groups_attached ( container ) & & ! container - > tables [ 0 ] )
2016-11-30 09:52:03 +03:00
container - > def_window_pending = true ;
2016-11-30 09:52:02 +03:00
}
2015-06-05 09:35:10 +03:00
2015-06-05 09:35:25 +03:00
if ( ! ret ) {
tcegrp - > grp = iommu_group ;
list_add ( & tcegrp - > next , & container - > group_list ) ;
}
2015-06-05 09:35:04 +03:00
2019-08-19 04:51:17 +03:00
free_exit :
2015-06-05 09:35:25 +03:00
if ( ret & & tcegrp )
kfree ( tcegrp ) ;
2019-08-19 04:51:17 +03:00
unlock_exit :
2013-05-21 07:33:10 +04:00
mutex_unlock ( & container - > lock ) ;
return ret ;
}
static void tce_iommu_detach_group ( void * iommu_data ,
struct iommu_group * iommu_group )
{
struct tce_container * container = iommu_data ;
2015-06-05 09:35:09 +03:00
struct iommu_table_group * table_group ;
2015-06-05 09:35:25 +03:00
bool found = false ;
struct tce_iommu_group * tcegrp ;
2013-05-21 07:33:10 +04:00
mutex_lock ( & container - > lock ) ;
2015-06-05 09:35:25 +03:00
list_for_each_entry ( tcegrp , & container - > group_list , next ) {
if ( tcegrp - > grp = = iommu_group ) {
found = true ;
break ;
}
2015-06-05 09:35:04 +03:00
}
2013-05-21 07:33:10 +04:00
2015-06-05 09:35:25 +03:00
if ( ! found ) {
pr_warn ( " tce_vfio: detaching unattached group #%u \n " ,
iommu_group_id ( iommu_group ) ) ;
goto unlock_exit ;
2013-05-21 07:33:10 +04:00
}
2015-06-05 09:35:04 +03:00
2015-06-05 09:35:25 +03:00
list_del ( & tcegrp - > next ) ;
kfree ( tcegrp ) ;
2015-06-05 09:35:09 +03:00
table_group = iommu_group_get_iommudata ( iommu_group ) ;
BUG_ON ( ! table_group ) ;
2015-06-05 09:35:10 +03:00
if ( ! table_group - > ops | | ! table_group - > ops - > release_ownership )
tce_iommu_release_ownership ( container , table_group ) ;
else
tce_iommu_release_ownership_ddw ( container , table_group ) ;
2015-06-05 09:35:04 +03:00
unlock_exit :
2013-05-21 07:33:10 +04:00
mutex_unlock ( & container - > lock ) ;
}
2019-04-03 21:36:21 +03:00
static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
2013-05-21 07:33:10 +04:00
. name = " iommu-vfio-powerpc " ,
. owner = THIS_MODULE ,
. open = tce_iommu_open ,
. release = tce_iommu_release ,
. ioctl = tce_iommu_ioctl ,
. attach_group = tce_iommu_attach_group ,
. detach_group = tce_iommu_detach_group ,
} ;
static int __init tce_iommu_init ( void )
{
return vfio_register_iommu_driver ( & tce_iommu_driver_ops ) ;
}
static void __exit tce_iommu_cleanup ( void )
{
vfio_unregister_iommu_driver ( & tce_iommu_driver_ops ) ;
}
module_init ( tce_iommu_init ) ;
module_exit ( tce_iommu_cleanup ) ;
MODULE_VERSION ( DRIVER_VERSION ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( DRIVER_AUTHOR ) ;
MODULE_DESCRIPTION ( DRIVER_DESC ) ;