2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 2001 Mike Corrigan & Dave Engebretsen , IBM Corporation
*
* Rewrite , cleanup , new allocation schemes , virtual merging :
* Copyright ( C ) 2004 Olof Johansson , IBM Corporation
* and Ben . Herrenschmidt , IBM Corporation
*
* Dynamic DMA mapping support , bus - independent parts .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include <linux/init.h>
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/spinlock.h>
# include <linux/string.h>
# include <linux/dma-mapping.h>
2009-12-16 03:48:28 +03:00
# include <linux/bitmap.h>
2008-02-05 09:28:08 +03:00
# include <linux/iommu-helper.h>
2008-10-23 00:39:04 +04:00
# include <linux/crash_dump.h>
2012-06-07 22:14:48 +04:00
# include <linux/hash.h>
2012-06-24 22:26:17 +04:00
# include <linux/fault-inject.h>
# include <linux/pci.h>
2013-05-21 07:33:09 +04:00
# include <linux/iommu.h>
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/prom.h>
# include <asm/iommu.h>
# include <asm/pci-bridge.h>
# include <asm/machdep.h>
2006-06-23 10:35:10 +04:00
# include <asm/kdump.h>
2012-02-20 06:15:03 +04:00
# include <asm/fadump.h>
2012-06-24 22:26:17 +04:00
# include <asm/vio.h>
2013-05-21 07:33:09 +04:00
# include <asm/tce.h>
2005-04-17 02:20:36 +04:00
# define DBG(...)
2010-03-02 17:25:38 +03:00
static int novmerge ;
2007-03-29 17:44:02 +04:00
2008-07-23 22:31:16 +04:00
static void __iommu_free ( struct iommu_table * , dma_addr_t , unsigned int ) ;
2005-04-17 02:20:36 +04:00
static int __init setup_iommu ( char * str )
{
if ( ! strcmp ( str , " novmerge " ) )
novmerge = 1 ;
else if ( ! strcmp ( str , " vmerge " ) )
novmerge = 0 ;
return 1 ;
}
__setup ( " iommu= " , setup_iommu ) ;
2012-06-07 22:14:48 +04:00
static DEFINE_PER_CPU ( unsigned int , iommu_pool_hash ) ;
/*
* We precalculate the hash to avoid doing it on every allocation .
*
* The hash is important to spread CPUs across all the pools . For example ,
* on a POWER7 with 4 way SMT we want interrupts on the primary threads and
* with 4 pools all primary threads would map to the same pool .
*/
static int __init setup_iommu_pool_hash ( void )
{
unsigned int i ;
for_each_possible_cpu ( i )
per_cpu ( iommu_pool_hash , i ) = hash_32 ( i , IOMMU_POOL_HASHBITS ) ;
return 0 ;
}
subsys_initcall ( setup_iommu_pool_hash ) ;
2012-06-24 22:26:17 +04:00
# ifdef CONFIG_FAIL_IOMMU
static DECLARE_FAULT_ATTR ( fail_iommu ) ;
static int __init setup_fail_iommu ( char * str )
{
return setup_fault_attr ( & fail_iommu , str ) ;
}
__setup ( " fail_iommu= " , setup_fail_iommu ) ;
static bool should_fail_iommu ( struct device * dev )
{
return dev - > archdata . fail_iommu & & should_fail ( & fail_iommu , 1 ) ;
}
static int __init fail_iommu_debugfs ( void )
{
struct dentry * dir = fault_create_debugfs_attr ( " fail_iommu " ,
NULL , & fail_iommu ) ;
2013-07-15 05:50:32 +04:00
return PTR_ERR_OR_ZERO ( dir ) ;
2012-06-24 22:26:17 +04:00
}
late_initcall ( fail_iommu_debugfs ) ;
static ssize_t fail_iommu_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
return sprintf ( buf , " %d \n " , dev - > archdata . fail_iommu ) ;
}
static ssize_t fail_iommu_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t count )
{
int i ;
if ( count > 0 & & sscanf ( buf , " %d " , & i ) > 0 )
dev - > archdata . fail_iommu = ( i = = 0 ) ? 0 : 1 ;
return count ;
}
2016-10-29 22:37:02 +03:00
static DEVICE_ATTR_RW ( fail_iommu ) ;
2012-06-24 22:26:17 +04:00
static int fail_iommu_bus_notify ( struct notifier_block * nb ,
unsigned long action , void * data )
{
struct device * dev = data ;
if ( action = = BUS_NOTIFY_ADD_DEVICE ) {
if ( device_create_file ( dev , & dev_attr_fail_iommu ) )
pr_warn ( " Unable to create IOMMU fault injection sysfs "
" entries \n " ) ;
} else if ( action = = BUS_NOTIFY_DEL_DEVICE ) {
device_remove_file ( dev , & dev_attr_fail_iommu ) ;
}
return 0 ;
}
static struct notifier_block fail_iommu_bus_notifier = {
. notifier_call = fail_iommu_bus_notify
} ;
static int __init fail_iommu_setup ( void )
{
# ifdef CONFIG_PCI
bus_register_notifier ( & pci_bus_type , & fail_iommu_bus_notifier ) ;
# endif
# ifdef CONFIG_IBMVIO
bus_register_notifier ( & vio_bus_type , & fail_iommu_bus_notifier ) ;
# endif
return 0 ;
}
/*
* Must execute after PCI and VIO subsystem have initialised but before
* devices are probed .
*/
arch_initcall ( fail_iommu_setup ) ;
# else
static inline bool should_fail_iommu ( struct device * dev )
{
return false ;
}
# endif
2008-02-05 09:28:08 +03:00
static unsigned long iommu_range_alloc ( struct device * dev ,
struct iommu_table * tbl ,
2005-04-17 02:20:36 +04:00
unsigned long npages ,
unsigned long * handle ,
2006-04-13 06:05:59 +04:00
unsigned long mask ,
2005-04-17 02:20:36 +04:00
unsigned int align_order )
{
2008-02-05 09:28:08 +03:00
unsigned long n , end , start ;
2005-04-17 02:20:36 +04:00
unsigned long limit ;
int largealloc = npages > 15 ;
int pass = 0 ;
unsigned long align_mask ;
2008-02-05 09:28:08 +03:00
unsigned long boundary_size ;
2012-06-03 23:44:25 +04:00
unsigned long flags ;
2012-06-07 22:14:48 +04:00
unsigned int pool_nr ;
struct iommu_pool * pool ;
2005-04-17 02:20:36 +04:00
2017-08-08 10:06:32 +03:00
align_mask = ( 1ull < < align_order ) - 1 ;
2005-04-17 02:20:36 +04:00
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
2006-10-04 19:25:44 +04:00
if ( unlikely ( npages = = 0 ) ) {
2005-04-17 02:20:36 +04:00
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
2017-05-21 15:17:10 +03:00
return IOMMU_MAPPING_ERROR ;
2005-04-17 02:20:36 +04:00
}
2012-06-24 22:26:17 +04:00
if ( should_fail_iommu ( dev ) )
2017-05-21 15:17:10 +03:00
return IOMMU_MAPPING_ERROR ;
2012-06-24 22:26:17 +04:00
2012-06-07 22:14:48 +04:00
/*
* We don ' t need to disable preemption here because any CPU can
* safely use any IOMMU pool .
*/
2017-07-20 20:26:06 +03:00
pool_nr = raw_cpu_read ( iommu_pool_hash ) & ( tbl - > nr_pools - 1 ) ;
2012-06-03 23:44:25 +04:00
2012-06-07 22:14:48 +04:00
if ( largealloc )
pool = & ( tbl - > large_pool ) ;
2005-04-17 02:20:36 +04:00
else
2012-06-07 22:14:48 +04:00
pool = & ( tbl - > pools [ pool_nr ] ) ;
2005-04-17 02:20:36 +04:00
2012-06-07 22:14:48 +04:00
spin_lock_irqsave ( & ( pool - > lock ) , flags ) ;
again :
2012-10-03 22:57:10 +04:00
if ( ( pass = = 0 ) & & handle & & * handle & &
( * handle > = pool - > start ) & & ( * handle < pool - > end ) )
2012-06-07 22:14:48 +04:00
start = * handle ;
else
start = pool - > hint ;
2005-04-17 02:20:36 +04:00
2012-06-07 22:14:48 +04:00
limit = pool - > end ;
2005-04-17 02:20:36 +04:00
/* The case below can happen if we have a small segment appended
* to a large , or when the previous alloc was at the very end of
* the available space . If so , go back to the initial start .
*/
if ( start > = limit )
2012-06-07 22:14:48 +04:00
start = pool - > start ;
2005-04-17 02:20:36 +04:00
2006-04-13 06:05:59 +04:00
if ( limit + tbl - > it_offset > mask ) {
limit = mask - tbl - > it_offset + 1 ;
/* If we're constrained on address range, first try
* at the masked hint to avoid O ( n ) search complexity ,
2012-06-07 22:14:48 +04:00
* but on second pass , start at 0 in pool 0.
2006-04-13 06:05:59 +04:00
*/
2012-06-07 22:14:48 +04:00
if ( ( start & mask ) > = limit | | pass > 0 ) {
2012-10-03 22:57:10 +04:00
spin_unlock ( & ( pool - > lock ) ) ;
2012-06-07 22:14:48 +04:00
pool = & ( tbl - > pools [ 0 ] ) ;
2012-10-03 22:57:10 +04:00
spin_lock ( & ( pool - > lock ) ) ;
2012-06-07 22:14:48 +04:00
start = pool - > start ;
} else {
2006-04-13 06:05:59 +04:00
start & = mask ;
2012-06-07 22:14:48 +04:00
}
2006-04-13 06:05:59 +04:00
}
2008-02-05 09:28:08 +03:00
if ( dev )
boundary_size = ALIGN ( dma_get_seg_boundary ( dev ) + 1 ,
2013-12-09 11:17:03 +04:00
1 < < tbl - > it_page_shift ) ;
2008-02-05 09:28:08 +03:00
else
2013-12-09 11:17:03 +04:00
boundary_size = ALIGN ( 1UL < < 32 , 1 < < tbl - > it_page_shift ) ;
2008-02-05 09:28:08 +03:00
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
2005-04-17 02:20:36 +04:00
2013-12-09 11:17:03 +04:00
n = iommu_area_alloc ( tbl - > it_map , limit , start , npages , tbl - > it_offset ,
boundary_size > > tbl - > it_page_shift , align_mask ) ;
2008-02-05 09:28:08 +03:00
if ( n = = - 1 ) {
2012-06-07 22:14:48 +04:00
if ( likely ( pass = = 0 ) ) {
/* First try the pool from the start */
pool - > hint = pool - > start ;
2005-04-17 02:20:36 +04:00
pass + + ;
goto again ;
2012-06-07 22:14:48 +04:00
} else if ( pass < = tbl - > nr_pools ) {
/* Now try scanning all the other pools */
spin_unlock ( & ( pool - > lock ) ) ;
pool_nr = ( pool_nr + 1 ) & ( tbl - > nr_pools - 1 ) ;
pool = & tbl - > pools [ pool_nr ] ;
spin_lock ( & ( pool - > lock ) ) ;
pool - > hint = pool - > start ;
pass + + ;
goto again ;
2005-04-17 02:20:36 +04:00
} else {
2012-06-07 22:14:48 +04:00
/* Give up */
spin_unlock_irqrestore ( & ( pool - > lock ) , flags ) ;
2017-05-21 15:17:10 +03:00
return IOMMU_MAPPING_ERROR ;
2005-04-17 02:20:36 +04:00
}
}
2008-02-05 09:28:08 +03:00
end = n + npages ;
2005-04-17 02:20:36 +04:00
/* Bump the hint to a new block for small allocs. */
if ( largealloc ) {
/* Don't bump to new block to avoid fragmentation */
2012-06-07 22:14:48 +04:00
pool - > hint = end ;
2005-04-17 02:20:36 +04:00
} else {
/* Overflow will be taken care of at the next allocation */
2012-06-07 22:14:48 +04:00
pool - > hint = ( end + tbl - > it_blocksize - 1 ) &
2005-04-17 02:20:36 +04:00
~ ( tbl - > it_blocksize - 1 ) ;
}
/* Update handle for SG allocations */
if ( handle )
* handle = end ;
2012-06-07 22:14:48 +04:00
spin_unlock_irqrestore ( & ( pool - > lock ) , flags ) ;
2005-04-17 02:20:36 +04:00
return n ;
}
2008-02-05 09:28:08 +03:00
static dma_addr_t iommu_alloc ( struct device * dev , struct iommu_table * tbl ,
void * page , unsigned int npages ,
enum dma_data_direction direction ,
2008-07-15 23:51:47 +04:00
unsigned long mask , unsigned int align_order ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
2012-06-03 23:44:25 +04:00
unsigned long entry ;
2017-05-21 15:17:10 +03:00
dma_addr_t ret = IOMMU_MAPPING_ERROR ;
2008-07-23 22:31:16 +04:00
int build_fail ;
2006-04-13 06:05:59 +04:00
2008-02-05 09:28:08 +03:00
entry = iommu_range_alloc ( dev , tbl , npages , NULL , mask , align_order ) ;
2005-04-17 02:20:36 +04:00
2017-05-21 15:17:10 +03:00
if ( unlikely ( entry = = IOMMU_MAPPING_ERROR ) )
return IOMMU_MAPPING_ERROR ;
2005-04-17 02:20:36 +04:00
entry + = tbl - > it_offset ; /* Offset into real TCE table */
2013-12-09 11:17:03 +04:00
ret = entry < < tbl - > it_page_shift ; /* Set the return dma address */
2005-04-17 02:20:36 +04:00
/* Put the TCEs in the HW table */
2015-06-05 09:35:06 +03:00
build_fail = tbl - > it_ops - > set ( tbl , entry , npages ,
2013-12-09 11:17:03 +04:00
( unsigned long ) page &
IOMMU_PAGE_MASK ( tbl ) , direction , attrs ) ;
2008-07-23 22:31:16 +04:00
2015-06-05 09:35:06 +03:00
/* tbl->it_ops->set() only returns non-zero for transient errors.
2008-07-23 22:31:16 +04:00
* Clean up the table bitmap in this case and return
2017-05-21 15:17:10 +03:00
* IOMMU_MAPPING_ERROR . For all other errors the functionality is
2008-07-23 22:31:16 +04:00
* not altered .
*/
if ( unlikely ( build_fail ) ) {
__iommu_free ( tbl , ret , npages ) ;
2017-05-21 15:17:10 +03:00
return IOMMU_MAPPING_ERROR ;
2008-07-23 22:31:16 +04:00
}
2005-04-17 02:20:36 +04:00
/* Flush/invalidate TLB caches if necessary */
2015-06-05 09:35:06 +03:00
if ( tbl - > it_ops - > flush )
tbl - > it_ops - > flush ( tbl ) ;
2005-04-17 02:20:36 +04:00
/* Make sure updates are seen by hardware */
mb ( ) ;
return ret ;
}
2012-06-03 23:43:44 +04:00
static bool iommu_free_check ( struct iommu_table * tbl , dma_addr_t dma_addr ,
unsigned int npages )
2005-04-17 02:20:36 +04:00
{
unsigned long entry , free_entry ;
2013-12-09 11:17:03 +04:00
entry = dma_addr > > tbl - > it_page_shift ;
2005-04-17 02:20:36 +04:00
free_entry = entry - tbl - > it_offset ;
if ( ( ( free_entry + npages ) > tbl - > it_size ) | |
( entry < tbl - > it_offset ) ) {
if ( printk_ratelimit ( ) ) {
printk ( KERN_INFO " iommu_free: invalid entry \n " ) ;
printk ( KERN_INFO " \t entry = 0x%lx \n " , entry ) ;
2009-01-06 17:26:03 +03:00
printk ( KERN_INFO " \t dma_addr = 0x%llx \n " , ( u64 ) dma_addr ) ;
printk ( KERN_INFO " \t Table = 0x%llx \n " , ( u64 ) tbl ) ;
printk ( KERN_INFO " \t bus# = 0x%llx \n " , ( u64 ) tbl - > it_busno ) ;
printk ( KERN_INFO " \t size = 0x%llx \n " , ( u64 ) tbl - > it_size ) ;
printk ( KERN_INFO " \t startOff = 0x%llx \n " , ( u64 ) tbl - > it_offset ) ;
printk ( KERN_INFO " \t index = 0x%llx \n " , ( u64 ) tbl - > it_index ) ;
2005-04-17 02:20:36 +04:00
WARN_ON ( 1 ) ;
}
2012-06-03 23:43:44 +04:00
return false ;
2005-04-17 02:20:36 +04:00
}
2012-06-03 23:43:44 +04:00
return true ;
}
2012-06-07 22:14:48 +04:00
static struct iommu_pool * get_pool ( struct iommu_table * tbl ,
unsigned long entry )
{
struct iommu_pool * p ;
unsigned long largepool_start = tbl - > large_pool . start ;
/* The large pool is the last pool at the top of the table */
if ( entry > = largepool_start ) {
p = & tbl - > large_pool ;
} else {
unsigned int pool_nr = entry / tbl - > poolsize ;
BUG_ON ( pool_nr > tbl - > nr_pools ) ;
p = & tbl - > pools [ pool_nr ] ;
}
return p ;
}
2012-06-03 23:43:44 +04:00
static void __iommu_free ( struct iommu_table * tbl , dma_addr_t dma_addr ,
unsigned int npages )
2005-04-17 02:20:36 +04:00
{
2012-06-03 23:43:44 +04:00
unsigned long entry , free_entry ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2012-06-07 22:14:48 +04:00
struct iommu_pool * pool ;
2005-04-17 02:20:36 +04:00
2013-12-09 11:17:03 +04:00
entry = dma_addr > > tbl - > it_page_shift ;
2012-06-03 23:43:44 +04:00
free_entry = entry - tbl - > it_offset ;
2012-06-07 22:14:48 +04:00
pool = get_pool ( tbl , free_entry ) ;
2012-06-03 23:43:44 +04:00
if ( ! iommu_free_check ( tbl , dma_addr , npages ) )
return ;
2015-06-05 09:35:06 +03:00
tbl - > it_ops - > clear ( tbl , entry , npages ) ;
2012-06-03 23:43:44 +04:00
2012-06-07 22:14:48 +04:00
spin_lock_irqsave ( & ( pool - > lock ) , flags ) ;
2012-06-03 23:43:44 +04:00
bitmap_clear ( tbl - > it_map , free_entry , npages ) ;
2012-06-07 22:14:48 +04:00
spin_unlock_irqrestore ( & ( pool - > lock ) , flags ) ;
2012-06-03 23:43:44 +04:00
}
static void iommu_free ( struct iommu_table * tbl , dma_addr_t dma_addr ,
unsigned int npages )
{
__iommu_free ( tbl , dma_addr , npages ) ;
2005-04-17 02:20:36 +04:00
/* Make sure TLB cache is flushed if the HW needs it. We do
* not do an mb ( ) here on purpose , it is not needed on any of
* the current platforms .
*/
2015-06-05 09:35:06 +03:00
if ( tbl - > it_ops - > flush )
tbl - > it_ops - > flush ( tbl ) ;
2005-04-17 02:20:36 +04:00
}
2014-11-05 17:28:30 +03:00
int ppc_iommu_map_sg ( struct device * dev , struct iommu_table * tbl ,
struct scatterlist * sglist , int nelems ,
unsigned long mask , enum dma_data_direction direction ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
dma_addr_t dma_next = 0 , dma_addr ;
struct scatterlist * s , * outs , * segstart ;
2008-07-23 22:31:16 +04:00
int outcount , incount , i , build_fail = 0 ;
2008-01-08 02:34:22 +03:00
unsigned int align ;
2005-04-17 02:20:36 +04:00
unsigned long handle ;
2008-02-05 09:27:57 +03:00
unsigned int max_seg_size ;
2005-04-17 02:20:36 +04:00
BUG_ON ( direction = = DMA_NONE ) ;
if ( ( nelems = = 0 ) | | ! tbl )
return 0 ;
outs = s = segstart = & sglist [ 0 ] ;
outcount = 1 ;
2005-08-18 01:32:18 +04:00
incount = nelems ;
2005-04-17 02:20:36 +04:00
handle = 0 ;
/* Init first segment length for backout at failure */
outs - > dma_length = 0 ;
2006-10-30 08:15:59 +03:00
DBG ( " sg mapping %d elements: \n " , nelems ) ;
2005-04-17 02:20:36 +04:00
2008-02-05 09:27:57 +03:00
max_seg_size = dma_get_max_seg_size ( dev ) ;
2007-10-12 15:44:12 +04:00
for_each_sg ( sglist , s , nelems , i ) {
2005-04-17 02:20:36 +04:00
unsigned long vaddr , npages , entry , slen ;
slen = s - > length ;
/* Sanity check */
if ( slen = = 0 ) {
dma_next = 0 ;
continue ;
}
/* Allocate iommu entries for that segment */
2007-10-22 22:02:46 +04:00
vaddr = ( unsigned long ) sg_virt ( s ) ;
2013-12-09 11:17:03 +04:00
npages = iommu_num_pages ( vaddr , slen , IOMMU_PAGE_SIZE ( tbl ) ) ;
2008-01-08 02:34:22 +03:00
align = 0 ;
2013-12-09 11:17:03 +04:00
if ( tbl - > it_page_shift < PAGE_SHIFT & & slen > = PAGE_SIZE & &
2008-01-08 02:34:22 +03:00
( vaddr & ~ PAGE_MASK ) = = 0 )
2013-12-09 11:17:03 +04:00
align = PAGE_SHIFT - tbl - > it_page_shift ;
2008-02-05 09:28:08 +03:00
entry = iommu_range_alloc ( dev , tbl , npages , & handle ,
2013-12-09 11:17:03 +04:00
mask > > tbl - > it_page_shift , align ) ;
2005-04-17 02:20:36 +04:00
DBG ( " - vaddr: %lx, size: %lx \n " , vaddr , slen ) ;
/* Handle failure */
2017-05-21 15:17:10 +03:00
if ( unlikely ( entry = = IOMMU_MAPPING_ERROR ) ) {
2016-10-11 23:54:17 +03:00
if ( ! ( attrs & DMA_ATTR_NO_WARN ) & &
printk_ratelimit ( ) )
2010-12-07 17:36:05 +03:00
dev_info ( dev , " iommu_alloc failed, tbl %p "
" vaddr %lx npages %lu \n " , tbl , vaddr ,
npages ) ;
2005-04-17 02:20:36 +04:00
goto failure ;
}
/* Convert entry to a dma_addr_t */
entry + = tbl - > it_offset ;
2013-12-09 11:17:03 +04:00
dma_addr = entry < < tbl - > it_page_shift ;
dma_addr | = ( s - > offset & ~ IOMMU_PAGE_MASK ( tbl ) ) ;
2005-04-17 02:20:36 +04:00
2006-10-30 08:15:59 +03:00
DBG ( " - %lu pages, entry: %lx, dma_addr: %lx \n " ,
2005-04-17 02:20:36 +04:00
npages , entry , dma_addr ) ;
/* Insert into HW table */
2015-06-05 09:35:06 +03:00
build_fail = tbl - > it_ops - > set ( tbl , entry , npages ,
2013-12-09 11:17:03 +04:00
vaddr & IOMMU_PAGE_MASK ( tbl ) ,
direction , attrs ) ;
2008-07-23 22:31:16 +04:00
if ( unlikely ( build_fail ) )
goto failure ;
2005-04-17 02:20:36 +04:00
/* If we are in an open segment, try merging */
if ( segstart ! = s ) {
DBG ( " - trying merge... \n " ) ;
/* We cannot merge if:
* - allocated dma_addr isn ' t contiguous to previous allocation
*/
2008-02-05 09:27:57 +03:00
if ( novmerge | | ( dma_addr ! = dma_next ) | |
( outs - > dma_length + s - > length > max_seg_size ) ) {
2005-04-17 02:20:36 +04:00
/* Can't merge: create a new segment */
segstart = s ;
2007-10-12 15:44:12 +04:00
outcount + + ;
outs = sg_next ( outs ) ;
2005-04-17 02:20:36 +04:00
DBG ( " can't merge, new segment. \n " ) ;
} else {
outs - > dma_length + = s - > length ;
2006-10-30 08:15:59 +03:00
DBG ( " merged, new len: %ux \n " , outs - > dma_length ) ;
2005-04-17 02:20:36 +04:00
}
}
if ( segstart = = s ) {
/* This is a new segment, fill entries */
DBG ( " - filling new segment. \n " ) ;
outs - > dma_address = dma_addr ;
outs - > dma_length = slen ;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen ;
DBG ( " - dma next is: %lx \n " , dma_next ) ;
}
/* Flush/invalidate TLB caches if necessary */
2015-06-05 09:35:06 +03:00
if ( tbl - > it_ops - > flush )
tbl - > it_ops - > flush ( tbl ) ;
2005-04-17 02:20:36 +04:00
DBG ( " mapped %d elements: \n " , outcount ) ;
2014-11-05 17:28:30 +03:00
/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
2005-04-17 02:20:36 +04:00
* next entry of the sglist if we didn ' t fill the list completely
*/
2005-08-18 01:32:18 +04:00
if ( outcount < incount ) {
2007-10-12 15:44:12 +04:00
outs = sg_next ( outs ) ;
2017-05-21 15:17:10 +03:00
outs - > dma_address = IOMMU_MAPPING_ERROR ;
2005-04-17 02:20:36 +04:00
outs - > dma_length = 0 ;
}
2006-01-31 06:51:54 +03:00
/* Make sure updates are seen by hardware */
mb ( ) ;
2005-04-17 02:20:36 +04:00
return outcount ;
failure :
2007-10-12 15:44:12 +04:00
for_each_sg ( sglist , s , nelems , i ) {
2005-04-17 02:20:36 +04:00
if ( s - > dma_length ! = 0 ) {
unsigned long vaddr , npages ;
2013-12-09 11:17:03 +04:00
vaddr = s - > dma_address & IOMMU_PAGE_MASK ( tbl ) ;
2008-10-16 09:02:13 +04:00
npages = iommu_num_pages ( s - > dma_address , s - > dma_length ,
2013-12-09 11:17:03 +04:00
IOMMU_PAGE_SIZE ( tbl ) ) ;
2012-06-03 23:44:25 +04:00
__iommu_free ( tbl , vaddr , npages ) ;
2017-05-21 15:17:10 +03:00
s - > dma_address = IOMMU_MAPPING_ERROR ;
2006-01-31 06:51:54 +03:00
s - > dma_length = 0 ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 15:44:12 +04:00
if ( s = = outs )
break ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2014-11-05 17:28:30 +03:00
void ppc_iommu_unmap_sg ( struct iommu_table * tbl , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
2007-10-12 15:44:12 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
BUG_ON ( direction = = DMA_NONE ) ;
if ( ! tbl )
return ;
2007-10-12 15:44:12 +04:00
sg = sglist ;
2005-04-17 02:20:36 +04:00
while ( nelems - - ) {
unsigned int npages ;
2007-10-12 15:44:12 +04:00
dma_addr_t dma_handle = sg - > dma_address ;
2005-04-17 02:20:36 +04:00
2007-10-12 15:44:12 +04:00
if ( sg - > dma_length = = 0 )
2005-04-17 02:20:36 +04:00
break ;
2008-10-16 09:02:13 +04:00
npages = iommu_num_pages ( dma_handle , sg - > dma_length ,
2013-12-09 11:17:03 +04:00
IOMMU_PAGE_SIZE ( tbl ) ) ;
2012-06-03 23:44:25 +04:00
__iommu_free ( tbl , dma_handle , npages ) ;
2007-10-12 15:44:12 +04:00
sg = sg_next ( sg ) ;
2005-04-17 02:20:36 +04:00
}
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
* do not do an mb ( ) here , the affected platforms do not need it
* when freeing .
*/
2015-06-05 09:35:06 +03:00
if ( tbl - > it_ops - > flush )
tbl - > it_ops - > flush ( tbl ) ;
2005-04-17 02:20:36 +04:00
}
2008-10-21 21:38:10 +04:00
static void iommu_table_clear ( struct iommu_table * tbl )
{
2012-02-20 06:15:03 +04:00
/*
* In case of firmware assisted dump system goes through clean
* reboot process at the time of system crash . Hence it ' s safe to
* clear the TCE entries if firmware assisted dump is active .
*/
if ( ! is_kdump_kernel ( ) | | is_fadump_active ( ) ) {
2008-10-21 21:38:10 +04:00
/* Clear the table in case firmware left allocations in it */
2015-06-05 09:35:06 +03:00
tbl - > it_ops - > clear ( tbl , tbl - > it_offset , tbl - > it_size ) ;
2008-10-21 21:38:10 +04:00
return ;
}
# ifdef CONFIG_CRASH_DUMP
2015-06-05 09:35:06 +03:00
if ( tbl - > it_ops - > get ) {
2008-10-21 21:38:10 +04:00
unsigned long index , tceval , tcecount = 0 ;
/* Reserve the existing mappings left by the first kernel. */
for ( index = 0 ; index < tbl - > it_size ; index + + ) {
2015-06-05 09:35:06 +03:00
tceval = tbl - > it_ops - > get ( tbl , index + tbl - > it_offset ) ;
2008-10-21 21:38:10 +04:00
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if ( tceval & & ( tceval ! = 0x7fffffffffffffffUL ) ) {
__set_bit ( index , tbl - > it_map ) ;
tcecount + + ;
}
}
if ( ( tbl - > it_size - tcecount ) < KDUMP_MIN_TCE_ENTRIES ) {
printk ( KERN_WARNING " TCE table is full; freeing " ) ;
printk ( KERN_WARNING " %d entries for the kdump boot \n " ,
KDUMP_MIN_TCE_ENTRIES ) ;
for ( index = tbl - > it_size - KDUMP_MIN_TCE_ENTRIES ;
index < tbl - > it_size ; index + + )
__clear_bit ( index , tbl - > it_map ) ;
}
}
# endif
}
2005-04-17 02:20:36 +04:00
/*
* Build a iommu_table structure . This contains a bit map which
* is used to manage allocation of the tce space .
*/
2006-06-10 14:58:08 +04:00
struct iommu_table * iommu_init_table ( struct iommu_table * tbl , int nid )
2005-04-17 02:20:36 +04:00
{
unsigned long sz ;
static int welcomed = 0 ;
2006-06-10 14:58:08 +04:00
struct page * page ;
2012-06-07 22:14:48 +04:00
unsigned int i ;
struct iommu_pool * p ;
2005-04-17 02:20:36 +04:00
2015-06-05 09:35:06 +03:00
BUG_ON ( ! tbl - > it_ops ) ;
2005-04-17 02:20:36 +04:00
/* number of bytes needed for the bitmap */
2012-11-04 06:03:43 +04:00
sz = BITS_TO_LONGS ( tbl - > it_size ) * sizeof ( unsigned long ) ;
2005-04-17 02:20:36 +04:00
2013-10-02 01:04:53 +04:00
page = alloc_pages_node ( nid , GFP_KERNEL , get_order ( sz ) ) ;
2006-06-10 14:58:08 +04:00
if ( ! page )
2005-04-17 02:20:36 +04:00
panic ( " iommu_init_table: Can't allocate %ld bytes \n " , sz ) ;
2006-06-10 14:58:08 +04:00
tbl - > it_map = page_address ( page ) ;
2005-04-17 02:20:36 +04:00
memset ( tbl - > it_map , 0 , sz ) ;
2011-09-20 07:07:24 +04:00
/*
* Reserve page 0 so it will not be used for any mappings .
* This avoids buggy drivers that consider page 0 to be invalid
* to crash the machine or even lose data .
*/
if ( tbl - > it_offset = = 0 )
set_bit ( 0 , tbl - > it_map ) ;
2012-06-07 22:14:48 +04:00
/* We only split the IOMMU table if we have 1GB or more of space */
2013-12-09 11:17:03 +04:00
if ( ( tbl - > it_size < < tbl - > it_page_shift ) > = ( 1UL * 1024 * 1024 * 1024 ) )
2012-06-07 22:14:48 +04:00
tbl - > nr_pools = IOMMU_NR_POOLS ;
else
tbl - > nr_pools = 1 ;
/* We reserve the top 1/4 of the table for large allocations */
2012-07-13 11:45:49 +04:00
tbl - > poolsize = ( tbl - > it_size * 3 / 4 ) / tbl - > nr_pools ;
2012-06-07 22:14:48 +04:00
2012-07-13 11:45:49 +04:00
for ( i = 0 ; i < tbl - > nr_pools ; i + + ) {
2012-06-07 22:14:48 +04:00
p = & tbl - > pools [ i ] ;
spin_lock_init ( & ( p - > lock ) ) ;
p - > start = tbl - > poolsize * i ;
p - > hint = p - > start ;
p - > end = p - > start + tbl - > poolsize ;
}
p = & tbl - > large_pool ;
spin_lock_init ( & ( p - > lock ) ) ;
p - > start = tbl - > poolsize * i ;
p - > hint = p - > start ;
p - > end = tbl - > it_size ;
2005-04-17 02:20:36 +04:00
2008-10-21 21:38:10 +04:00
iommu_table_clear ( tbl ) ;
2005-06-20 15:43:48 +04:00
2005-04-17 02:20:36 +04:00
if ( ! welcomed ) {
printk ( KERN_INFO " IOMMU table initialized, virtual merging %s \n " ,
novmerge ? " disabled " : " enabled " ) ;
welcomed = 1 ;
}
return tbl ;
}
2017-03-22 07:21:50 +03:00
static void iommu_table_free ( struct kref * kref )
2005-04-17 02:20:36 +04:00
{
2012-11-04 06:03:43 +04:00
unsigned long bitmap_sz ;
2005-04-17 02:20:36 +04:00
unsigned int order ;
2017-03-22 07:21:50 +03:00
struct iommu_table * tbl ;
2005-04-17 02:20:36 +04:00
2017-03-22 07:21:50 +03:00
tbl = container_of ( kref , struct iommu_table , it_kref ) ;
2015-06-05 09:34:57 +03:00
2017-03-22 07:21:49 +03:00
if ( tbl - > it_ops - > free )
tbl - > it_ops - > free ( tbl ) ;
2015-06-05 09:34:57 +03:00
if ( ! tbl - > it_map ) {
kfree ( tbl ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2012-12-28 13:08:51 +04:00
/*
* In case we have reserved the first bit , we should not emit
* the warning below .
*/
if ( tbl - > it_offset = = 0 )
clear_bit ( 0 , tbl - > it_map ) ;
2005-04-17 02:20:36 +04:00
/* verify that table contains no entries */
2012-11-04 06:03:43 +04:00
if ( ! bitmap_empty ( tbl - > it_map , tbl - > it_size ) )
2017-03-22 07:21:50 +03:00
pr_warn ( " %s: Unexpected TCEs \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
/* calculate bitmap size in bytes */
2012-11-04 06:03:43 +04:00
bitmap_sz = BITS_TO_LONGS ( tbl - > it_size ) * sizeof ( unsigned long ) ;
2005-04-17 02:20:36 +04:00
/* free bitmap */
order = get_order ( bitmap_sz ) ;
free_pages ( ( unsigned long ) tbl - > it_map , order ) ;
/* free table */
kfree ( tbl ) ;
}
2017-03-22 07:21:50 +03:00
struct iommu_table * iommu_tce_table_get ( struct iommu_table * tbl )
{
if ( kref_get_unless_zero ( & tbl - > it_kref ) )
return tbl ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( iommu_tce_table_get ) ;
int iommu_tce_table_put ( struct iommu_table * tbl )
{
if ( WARN_ON ( ! tbl ) )
return 0 ;
return kref_put ( & tbl - > it_kref , iommu_table_free ) ;
}
EXPORT_SYMBOL_GPL ( iommu_tce_table_put ) ;
2005-04-17 02:20:36 +04:00
/* Creates TCEs for a user provided buffer. The user buffer must be
2008-10-27 23:38:08 +03:00
* contiguous real kernel storage ( not vmalloc ) . The address passed here
* comprises a page address and offset into that page . The dma_addr_t
* returned will point to the same byte within the page as was passed in .
2005-04-17 02:20:36 +04:00
*/
2008-10-27 23:38:08 +03:00
dma_addr_t iommu_map_page ( struct device * dev , struct iommu_table * tbl ,
struct page * page , unsigned long offset , size_t size ,
unsigned long mask , enum dma_data_direction direction ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
2017-05-21 15:17:10 +03:00
dma_addr_t dma_handle = IOMMU_MAPPING_ERROR ;
2008-10-27 23:38:08 +03:00
void * vaddr ;
2005-04-17 02:20:36 +04:00
unsigned long uaddr ;
2008-01-08 02:34:22 +03:00
unsigned int npages , align ;
2005-04-17 02:20:36 +04:00
BUG_ON ( direction = = DMA_NONE ) ;
2008-10-27 23:38:08 +03:00
vaddr = page_address ( page ) + offset ;
2005-04-17 02:20:36 +04:00
uaddr = ( unsigned long ) vaddr ;
if ( tbl ) {
2018-08-21 21:44:48 +03:00
npages = iommu_num_pages ( uaddr , size , IOMMU_PAGE_SIZE ( tbl ) ) ;
2008-01-08 02:34:22 +03:00
align = 0 ;
2013-12-09 11:17:03 +04:00
if ( tbl - > it_page_shift < PAGE_SHIFT & & size > = PAGE_SIZE & &
2008-01-08 02:34:22 +03:00
( ( unsigned long ) vaddr & ~ PAGE_MASK ) = = 0 )
2013-12-09 11:17:03 +04:00
align = PAGE_SHIFT - tbl - > it_page_shift ;
2008-01-08 02:34:22 +03:00
2008-02-05 09:28:08 +03:00
dma_handle = iommu_alloc ( dev , tbl , vaddr , npages , direction ,
2013-12-09 11:17:03 +04:00
mask > > tbl - > it_page_shift , align ,
2008-07-15 23:51:47 +04:00
attrs ) ;
2017-05-21 15:17:10 +03:00
if ( dma_handle = = IOMMU_MAPPING_ERROR ) {
2016-10-11 23:54:17 +03:00
if ( ! ( attrs & DMA_ATTR_NO_WARN ) & &
printk_ratelimit ( ) ) {
2010-12-07 17:36:05 +03:00
dev_info ( dev , " iommu_alloc failed, tbl %p "
" vaddr %p npages %d \n " , tbl , vaddr ,
npages ) ;
2005-04-17 02:20:36 +04:00
}
} else
2013-12-09 11:17:03 +04:00
dma_handle | = ( uaddr & ~ IOMMU_PAGE_MASK ( tbl ) ) ;
2005-04-17 02:20:36 +04:00
}
return dma_handle ;
}
2008-10-27 23:38:08 +03:00
void iommu_unmap_page ( struct iommu_table * tbl , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction ,
2016-08-03 23:46:00 +03:00
unsigned long attrs )
2005-04-17 02:20:36 +04:00
{
2006-10-30 08:15:59 +03:00
unsigned int npages ;
2005-04-17 02:20:36 +04:00
BUG_ON ( direction = = DMA_NONE ) ;
2006-10-30 08:15:59 +03:00
if ( tbl ) {
2013-12-09 11:17:03 +04:00
npages = iommu_num_pages ( dma_handle , size ,
IOMMU_PAGE_SIZE ( tbl ) ) ;
2006-10-30 08:15:59 +03:00
iommu_free ( tbl , dma_handle , npages ) ;
}
2005-04-17 02:20:36 +04:00
}
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address ( mapping ) of the first page .
*/
2008-02-05 09:28:08 +03:00
void * iommu_alloc_coherent ( struct device * dev , struct iommu_table * tbl ,
size_t size , dma_addr_t * dma_handle ,
unsigned long mask , gfp_t flag , int node )
2005-04-17 02:20:36 +04:00
{
void * ret = NULL ;
dma_addr_t mapping ;
2006-10-30 08:15:59 +03:00
unsigned int order ;
unsigned int nio_pages , io_order ;
2006-06-06 18:11:35 +04:00
struct page * page ;
2005-04-17 02:20:36 +04:00
size = PAGE_ALIGN ( size ) ;
order = get_order ( size ) ;
/*
* Client asked for way too much space . This is checked later
* anyway . It is easier to debug here for the drivers than in
* the tce tables .
*/
if ( order > = IOMAP_MAX_ORDER ) {
2010-12-07 17:36:05 +03:00
dev_info ( dev , " iommu_alloc_consistent size too large: 0x%lx \n " ,
size ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
if ( ! tbl )
return NULL ;
/* Alloc enough pages (and possibly more) */
2006-06-10 12:17:35 +04:00
page = alloc_pages_node ( node , flag , order ) ;
2006-06-06 18:11:35 +04:00
if ( ! page )
2005-04-17 02:20:36 +04:00
return NULL ;
2006-06-06 18:11:35 +04:00
ret = page_address ( page ) ;
2005-04-17 02:20:36 +04:00
memset ( ret , 0 , size ) ;
/* Set up tces to cover the allocated range */
2013-12-09 11:17:03 +04:00
nio_pages = size > > tbl - > it_page_shift ;
io_order = get_iommu_order ( size , tbl ) ;
2008-02-05 09:28:08 +03:00
mapping = iommu_alloc ( dev , tbl , ret , nio_pages , DMA_BIDIRECTIONAL ,
2016-08-03 23:46:00 +03:00
mask > > tbl - > it_page_shift , io_order , 0 ) ;
2017-05-21 15:17:10 +03:00
if ( mapping = = IOMMU_MAPPING_ERROR ) {
2005-04-17 02:20:36 +04:00
free_pages ( ( unsigned long ) ret , order ) ;
2006-06-06 18:11:35 +04:00
return NULL ;
}
* dma_handle = mapping ;
2005-04-17 02:20:36 +04:00
return ret ;
}
void iommu_free_coherent ( struct iommu_table * tbl , size_t size ,
void * vaddr , dma_addr_t dma_handle )
{
if ( tbl ) {
2006-10-30 08:15:59 +03:00
unsigned int nio_pages ;
size = PAGE_ALIGN ( size ) ;
2013-12-09 11:17:03 +04:00
nio_pages = size > > tbl - > it_page_shift ;
2006-10-30 08:15:59 +03:00
iommu_free ( tbl , dma_handle , nio_pages ) ;
2005-04-17 02:20:36 +04:00
size = PAGE_ALIGN ( size ) ;
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
}
2013-05-21 07:33:09 +04:00
2015-06-05 09:35:05 +03:00
unsigned long iommu_direction_to_tce_perm ( enum dma_data_direction dir )
{
switch ( dir ) {
case DMA_BIDIRECTIONAL :
return TCE_PCI_READ | TCE_PCI_WRITE ;
case DMA_FROM_DEVICE :
return TCE_PCI_WRITE ;
case DMA_TO_DEVICE :
return TCE_PCI_READ ;
default :
return 0 ;
}
}
EXPORT_SYMBOL_GPL ( iommu_direction_to_tce_perm ) ;
2013-05-21 07:33:09 +04:00
# ifdef CONFIG_IOMMU_API
/*
* SPAPR TCE API
*/
static void group_release ( void * iommu_data )
{
2015-06-05 09:35:08 +03:00
struct iommu_table_group * table_group = iommu_data ;
table_group - > group = NULL ;
2013-05-21 07:33:09 +04:00
}
2015-06-05 09:35:08 +03:00
void iommu_register_group ( struct iommu_table_group * table_group ,
2013-05-21 07:33:09 +04:00
int pci_domain_number , unsigned long pe_num )
{
struct iommu_group * grp ;
char * name ;
grp = iommu_group_alloc ( ) ;
if ( IS_ERR ( grp ) ) {
pr_warn ( " powerpc iommu api: cannot create new group, err=%ld \n " ,
PTR_ERR ( grp ) ) ;
return ;
}
2015-06-05 09:35:08 +03:00
table_group - > group = grp ;
iommu_group_set_iommudata ( grp , table_group , group_release ) ;
2013-05-21 07:33:09 +04:00
name = kasprintf ( GFP_KERNEL , " domain%d-pe%lx " ,
pci_domain_number , pe_num ) ;
if ( ! name )
return ;
iommu_group_set_name ( grp , name ) ;
kfree ( name ) ;
}
enum dma_data_direction iommu_tce_direction ( unsigned long tce )
{
if ( ( tce & TCE_PCI_READ ) & & ( tce & TCE_PCI_WRITE ) )
return DMA_BIDIRECTIONAL ;
else if ( tce & TCE_PCI_READ )
return DMA_TO_DEVICE ;
else if ( tce & TCE_PCI_WRITE )
return DMA_FROM_DEVICE ;
else
return DMA_NONE ;
}
EXPORT_SYMBOL_GPL ( iommu_tce_direction ) ;
void iommu_flush_tce ( struct iommu_table * tbl )
{
/* Flush/invalidate TLB caches if necessary */
2015-06-05 09:35:06 +03:00
if ( tbl - > it_ops - > flush )
tbl - > it_ops - > flush ( tbl ) ;
2013-05-21 07:33:09 +04:00
/* Make sure updates are seen by hardware */
mb ( ) ;
}
EXPORT_SYMBOL_GPL ( iommu_flush_tce ) ;
2017-03-22 07:21:55 +03:00
int iommu_tce_check_ioba ( unsigned long page_shift ,
unsigned long offset , unsigned long size ,
unsigned long ioba , unsigned long npages )
2013-05-21 07:33:09 +04:00
{
2017-03-22 07:21:55 +03:00
unsigned long mask = ( 1UL < < page_shift ) - 1 ;
2013-05-21 07:33:09 +04:00
2017-03-22 07:21:55 +03:00
if ( ioba & mask )
2013-05-21 07:33:09 +04:00
return - EINVAL ;
2017-03-22 07:21:55 +03:00
ioba > > = page_shift ;
if ( ioba < offset )
2013-05-21 07:33:09 +04:00
return - EINVAL ;
2017-03-22 07:21:55 +03:00
if ( ( ioba + 1 ) > ( offset + size ) )
2013-05-21 07:33:09 +04:00
return - EINVAL ;
return 0 ;
}
2017-03-22 07:21:55 +03:00
EXPORT_SYMBOL_GPL ( iommu_tce_check_ioba ) ;
2013-05-21 07:33:09 +04:00
2017-03-22 07:21:55 +03:00
int iommu_tce_check_gpa ( unsigned long page_shift , unsigned long gpa )
2013-05-21 07:33:09 +04:00
{
2017-03-22 07:21:55 +03:00
unsigned long mask = ( 1UL < < page_shift ) - 1 ;
2013-05-21 07:33:09 +04:00
2017-03-22 07:21:55 +03:00
if ( gpa & mask )
2013-05-21 07:33:09 +04:00
return - EINVAL ;
return 0 ;
}
2017-03-22 07:21:55 +03:00
EXPORT_SYMBOL_GPL ( iommu_tce_check_gpa ) ;
2013-05-21 07:33:09 +04:00
2015-06-05 09:35:15 +03:00
long iommu_tce_xchg ( struct iommu_table * tbl , unsigned long entry ,
unsigned long * hpa , enum dma_data_direction * direction )
2013-05-21 07:33:09 +04:00
{
2015-06-05 09:35:15 +03:00
long ret ;
2013-05-21 07:33:09 +04:00
2015-06-05 09:35:15 +03:00
ret = tbl - > it_ops - > exchange ( tbl , entry , hpa , direction ) ;
2013-05-21 07:33:09 +04:00
2015-06-05 09:35:15 +03:00
if ( ! ret & & ( ( * direction = = DMA_FROM_DEVICE ) | |
( * direction = = DMA_BIDIRECTIONAL ) ) )
SetPageDirty ( pfn_to_page ( * hpa > > PAGE_SHIFT ) ) ;
2013-05-21 07:33:09 +04:00
/* if (unlikely(ret))
pr_err ( " iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d \n " ,
2014-07-15 13:24:25 +04:00
__func__ , hwaddr , entry < < tbl - > it_page_shift ,
2013-05-21 07:33:09 +04:00
hwaddr , ret ) ; */
return ret ;
}
2015-06-05 09:35:15 +03:00
EXPORT_SYMBOL_GPL ( iommu_tce_xchg ) ;
2013-05-21 07:33:09 +04:00
int iommu_take_ownership ( struct iommu_table * tbl )
{
2015-06-05 09:35:11 +03:00
unsigned long flags , i , sz = ( tbl - > it_size + 7 ) > > 3 ;
int ret = 0 ;
2015-06-05 09:35:15 +03:00
/*
* VFIO does not control TCE entries allocation and the guest
* can write new TCEs on top of existing ones so iommu_tce_build ( )
* must be able to release old pages . This functionality
* requires exchange ( ) callback defined so if it is not
* implemented , we disallow taking ownership over the table .
*/
if ( ! tbl - > it_ops - > exchange )
return - EINVAL ;
2015-06-05 09:35:11 +03:00
spin_lock_irqsave ( & tbl - > large_pool . lock , flags ) ;
for ( i = 0 ; i < tbl - > nr_pools ; i + + )
spin_lock ( & tbl - > pools [ i ] . lock ) ;
2013-05-21 07:33:09 +04:00
if ( tbl - > it_offset = = 0 )
clear_bit ( 0 , tbl - > it_map ) ;
if ( ! bitmap_empty ( tbl - > it_map , tbl - > it_size ) ) {
pr_err ( " iommu_tce: it_map is not empty " ) ;
2015-06-05 09:35:11 +03:00
ret = - EBUSY ;
/* Restore bit#0 set by iommu_init_table() */
if ( tbl - > it_offset = = 0 )
set_bit ( 0 , tbl - > it_map ) ;
} else {
memset ( tbl - > it_map , 0xff , sz ) ;
2013-05-21 07:33:09 +04:00
}
2015-06-05 09:35:11 +03:00
for ( i = 0 ; i < tbl - > nr_pools ; i + + )
spin_unlock ( & tbl - > pools [ i ] . lock ) ;
spin_unlock_irqrestore ( & tbl - > large_pool . lock , flags ) ;
2013-05-21 07:33:09 +04:00
2015-06-05 09:35:11 +03:00
return ret ;
2013-05-21 07:33:09 +04:00
}
EXPORT_SYMBOL_GPL ( iommu_take_ownership ) ;
void iommu_release_ownership ( struct iommu_table * tbl )
{
2015-06-05 09:35:11 +03:00
unsigned long flags , i , sz = ( tbl - > it_size + 7 ) > > 3 ;
spin_lock_irqsave ( & tbl - > large_pool . lock , flags ) ;
for ( i = 0 ; i < tbl - > nr_pools ; i + + )
spin_lock ( & tbl - > pools [ i ] . lock ) ;
2013-05-21 07:33:09 +04:00
memset ( tbl - > it_map , 0 , sz ) ;
/* Restore bit#0 set by iommu_init_table() */
if ( tbl - > it_offset = = 0 )
set_bit ( 0 , tbl - > it_map ) ;
2015-06-05 09:35:11 +03:00
for ( i = 0 ; i < tbl - > nr_pools ; i + + )
spin_unlock ( & tbl - > pools [ i ] . lock ) ;
spin_unlock_irqrestore ( & tbl - > large_pool . lock , flags ) ;
2013-05-21 07:33:09 +04:00
}
EXPORT_SYMBOL_GPL ( iommu_release_ownership ) ;
2013-11-21 10:43:14 +04:00
int iommu_add_device ( struct device * dev )
2013-05-21 07:33:09 +04:00
{
struct iommu_table * tbl ;
2015-06-05 09:35:09 +03:00
struct iommu_table_group_link * tgl ;
2013-05-21 07:33:09 +04:00
2014-08-06 11:10:16 +04:00
/*
* The sysfs entries should be populated before
* binding IOMMU group . If sysfs entries isn ' t
* ready , we simply bail .
*/
if ( ! device_is_registered ( dev ) )
return - ENOENT ;
if ( dev - > iommu_group ) {
pr_debug ( " %s: Skipping device %s with iommu group %d \n " ,
__func__ , dev_name ( dev ) ,
iommu_group_id ( dev - > iommu_group ) ) ;
2013-05-21 07:33:09 +04:00
return - EBUSY ;
}
tbl = get_iommu_table_base ( dev ) ;
2015-06-05 09:35:09 +03:00
if ( ! tbl ) {
2014-08-06 11:10:16 +04:00
pr_debug ( " %s: Skipping device %s with no tbl \n " ,
__func__ , dev_name ( dev ) ) ;
2013-05-21 07:33:09 +04:00
return 0 ;
}
2015-06-05 09:35:09 +03:00
tgl = list_first_entry_or_null ( & tbl - > it_group_list ,
struct iommu_table_group_link , next ) ;
if ( ! tgl ) {
pr_debug ( " %s: Skipping device %s with no group \n " ,
__func__ , dev_name ( dev ) ) ;
return 0 ;
}
2014-08-06 11:10:16 +04:00
pr_debug ( " %s: Adding %s to iommu group %d \n " ,
__func__ , dev_name ( dev ) ,
2015-06-05 09:35:09 +03:00
iommu_group_id ( tgl - > table_group - > group ) ) ;
2013-05-21 07:33:09 +04:00
2013-12-09 11:17:03 +04:00
if ( PAGE_SIZE < IOMMU_PAGE_SIZE ( tbl ) ) {
2014-08-06 11:10:16 +04:00
pr_err ( " %s: Invalid IOMMU page size %lx (%lx) on %s \n " ,
__func__ , IOMMU_PAGE_SIZE ( tbl ) ,
PAGE_SIZE , dev_name ( dev ) ) ;
2013-12-09 11:17:03 +04:00
return - EINVAL ;
}
2015-06-05 09:35:09 +03:00
return iommu_group_add_device ( tgl - > table_group - > group , dev ) ;
2013-05-21 07:33:09 +04:00
}
2013-11-21 10:43:14 +04:00
EXPORT_SYMBOL_GPL ( iommu_add_device ) ;
2013-05-21 07:33:09 +04:00
2013-11-21 10:43:14 +04:00
void iommu_del_device ( struct device * dev )
2013-05-21 07:33:09 +04:00
{
2014-01-13 07:36:22 +04:00
/*
* Some devices might not have IOMMU table and group
* and we needn ' t detach them from the associated
* IOMMU groups
*/
if ( ! dev - > iommu_group ) {
pr_debug ( " iommu_tce: skipping device %s with no tbl \n " ,
dev_name ( dev ) ) ;
return ;
}
2013-05-21 07:33:09 +04:00
iommu_group_remove_device ( dev ) ;
}
2013-11-21 10:43:14 +04:00
EXPORT_SYMBOL_GPL ( iommu_del_device ) ;
2013-05-21 07:33:09 +04:00
2015-02-21 22:00:50 +03:00
static int tce_iommu_bus_notifier ( struct notifier_block * nb ,
unsigned long action , void * data )
{
struct device * dev = data ;
switch ( action ) {
case BUS_NOTIFY_ADD_DEVICE :
return iommu_add_device ( dev ) ;
case BUS_NOTIFY_DEL_DEVICE :
if ( dev - > iommu_group )
iommu_del_device ( dev ) ;
return 0 ;
default :
return 0 ;
}
}
static struct notifier_block tce_iommu_bus_nb = {
. notifier_call = tce_iommu_bus_notifier ,
} ;
int __init tce_iommu_bus_notifier_init ( void )
{
bus_register_notifier ( & pci_bus_type , & tce_iommu_bus_nb ) ;
return 0 ;
}
2013-05-21 07:33:09 +04:00
# endif /* CONFIG_IOMMU_API */