2005-10-06 12:06:20 +10:00
/*
* Copyright ( C ) 2001 Mike Corrigan & Dave Engebretsen , IBM Corporation
* Rewrite , cleanup :
2005-11-21 02:12:32 -06:00
* Copyright ( C ) 2004 Olof Johansson < olof @ lixom . net > , IBM Corporation
2015-06-24 15:25:22 +10:00
*
2005-10-06 12:06:20 +10:00
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
2015-06-24 15:25:22 +10:00
*
2005-10-06 12:06:20 +10:00
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
2015-06-24 15:25:22 +10:00
*
2005-10-06 12:06:20 +10:00
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# ifndef _ASM_IOMMU_H
# define _ASM_IOMMU_H
2005-12-16 22:43:46 +01:00
# ifdef __KERNEL__
2005-10-06 12:06:20 +10:00
2006-10-30 16:15:59 +11:00
# include <linux/compiler.h>
2005-10-06 12:06:20 +10:00
# include <linux/spinlock.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
2007-10-18 23:40:25 -07:00
# include <linux/bitops.h>
2007-05-03 22:28:32 +10:00
# include <asm/machdep.h>
2006-10-30 16:15:59 +11:00
# include <asm/types.h>
2015-03-31 16:00:48 +11:00
# include <asm/pci-bridge.h>
2006-10-30 16:15:59 +11:00
2013-12-09 18:17:01 +11:00
# define IOMMU_PAGE_SHIFT_4K 12
# define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
# define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
# define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
2006-10-30 16:15:59 +11:00
2013-12-09 18:17:03 +11:00
# define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
# define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
# define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
2006-11-11 17:25:18 +11:00
/* Boot time flags */
extern int iommu_is_off ;
extern int iommu_force_on ;
2006-10-30 16:15:59 +11:00
2015-06-05 16:35:06 +10:00
struct iommu_table_ops {
2015-06-05 16:35:15 +10:00
/*
* When called with direction = = DMA_NONE , it is equal to clear ( ) .
* uaddr is a linear map address .
*/
2015-06-05 16:35:06 +10:00
int ( * set ) ( struct iommu_table * tbl ,
long index , long npages ,
unsigned long uaddr ,
enum dma_data_direction direction ,
struct dma_attrs * attrs ) ;
2015-06-05 16:35:15 +10:00
# ifdef CONFIG_IOMMU_API
/*
* Exchanges existing TCE with new TCE plus direction bits ;
* returns old TCE and DMA direction mask .
* @ tce is a physical address .
*/
int ( * exchange ) ( struct iommu_table * tbl ,
long index ,
unsigned long * hpa ,
enum dma_data_direction * direction ) ;
# endif
2015-06-05 16:35:06 +10:00
void ( * clear ) ( struct iommu_table * tbl ,
long index , long npages ) ;
2015-06-05 16:35:15 +10:00
/* get() returns a physical address */
2015-06-05 16:35:06 +10:00
unsigned long ( * get ) ( struct iommu_table * tbl , long index ) ;
void ( * flush ) ( struct iommu_table * tbl ) ;
2015-06-05 16:35:20 +10:00
void ( * free ) ( struct iommu_table * tbl ) ;
2015-06-05 16:35:06 +10:00
} ;
/* These are used by VIO */
extern struct iommu_table_ops iommu_table_lpar_multi_ops ;
extern struct iommu_table_ops iommu_table_pseries_ops ;
2005-10-06 12:06:20 +10:00
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get . IOMAP_MAX_ORDER = 13
* allows up to 2 * * 12 pages ( 4096 * 4096 ) = 16 MB
*/
2006-10-30 16:15:59 +11:00
# define IOMAP_MAX_ORDER 13
2005-10-06 12:06:20 +10:00
2012-06-07 18:14:48 +00:00
# define IOMMU_POOL_HASHBITS 2
# define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
struct iommu_pool {
unsigned long start ;
unsigned long end ;
unsigned long hint ;
spinlock_t lock ;
} ____cacheline_aligned_in_smp ;
2005-10-06 12:06:20 +10:00
struct iommu_table {
unsigned long it_busno ; /* Bus number this table belongs to */
unsigned long it_size ; /* Size of iommu table in entries */
2015-06-05 16:35:19 +10:00
unsigned long it_indirect_levels ;
unsigned long it_level_size ;
2015-06-05 16:35:22 +10:00
unsigned long it_allocated_size ;
2005-10-06 12:06:20 +10:00
unsigned long it_offset ; /* Offset into global table */
unsigned long it_base ; /* mapped address of tce table */
unsigned long it_index ; /* which iommu table this is */
unsigned long it_type ; /* type: PCI or Virtual Bus */
unsigned long it_blocksize ; /* Entries in each block (cacheline) */
2012-06-07 18:14:48 +00:00
unsigned long poolsize ;
unsigned long nr_pools ;
struct iommu_pool large_pool ;
struct iommu_pool pools [ IOMMU_NR_POOLS ] ;
2005-10-06 12:06:20 +10:00
unsigned long * it_map ; /* A simple allocation bitmap for now */
2013-12-09 18:17:02 +11:00
unsigned long it_page_shift ; /* table iommu page size */
2015-06-05 16:35:09 +10:00
struct list_head it_group_list ; /* List of iommu_table_group_link */
2015-06-05 16:35:25 +10:00
unsigned long * it_userspace ; /* userspace view of the table */
2015-06-05 16:35:06 +10:00
struct iommu_table_ops * it_ops ;
2005-10-06 12:06:20 +10:00
} ;
2015-06-05 16:35:25 +10:00
# define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
( ( tbl ) - > it_userspace ? \
& ( ( tbl ) - > it_userspace [ ( entry ) - ( tbl ) - > it_offset ] ) : \
NULL )
2013-12-09 18:17:03 +11:00
/* Pure 2^n version of get_order */
static inline __attribute_const__
int get_iommu_order ( unsigned long size , struct iommu_table * tbl )
{
return __ilog2 ( ( size - 1 ) > > tbl - > it_page_shift ) + 1 ;
}
2005-10-06 12:06:20 +10:00
struct scatterlist ;
2015-06-24 15:25:22 +10:00
# ifdef CONFIG_PPC64
static inline void set_iommu_table_base ( struct device * dev ,
struct iommu_table * base )
2009-09-21 08:26:35 +00:00
{
2015-06-24 15:25:22 +10:00
dev - > archdata . iommu_table_base = base ;
2009-09-21 08:26:35 +00:00
}
static inline void * get_iommu_table_base ( struct device * dev )
{
2015-06-24 15:25:22 +10:00
return dev - > archdata . iommu_table_base ;
2009-09-21 08:26:35 +00:00
}
2015-06-24 15:25:22 +10:00
extern int dma_iommu_dma_supported ( struct device * dev , u64 mask ) ;
2005-10-06 12:06:20 +10:00
/* Frees table for an individual device node */
2007-12-06 13:39:19 +11:00
extern void iommu_free_table ( struct iommu_table * tbl , const char * node_name ) ;
2005-10-06 12:06:20 +10:00
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
2006-06-10 20:58:08 +10:00
extern struct iommu_table * iommu_init_table ( struct iommu_table * tbl ,
int nid ) ;
2015-06-05 16:35:26 +10:00
# define IOMMU_TABLE_GROUP_MAX_TABLES 2
2015-06-05 16:35:08 +10:00
2015-06-05 16:35:10 +10:00
struct iommu_table_group ;
struct iommu_table_group_ops {
2015-06-05 16:35:22 +10:00
unsigned long ( * get_table_size ) (
__u32 page_shift ,
__u64 window_size ,
__u32 levels ) ;
2015-06-05 16:35:20 +10:00
long ( * create_table ) ( struct iommu_table_group * table_group ,
int num ,
__u32 page_shift ,
__u64 window_size ,
__u32 levels ,
struct iommu_table * * ptbl ) ;
long ( * set_window ) ( struct iommu_table_group * table_group ,
int num ,
struct iommu_table * tblnew ) ;
long ( * unset_window ) ( struct iommu_table_group * table_group ,
int num ) ;
2015-06-05 16:35:10 +10:00
/* Switch ownership from platform code to external user (e.g. VFIO) */
void ( * take_ownership ) ( struct iommu_table_group * table_group ) ;
/* Switch ownership from external user (e.g. VFIO) back to core */
void ( * release_ownership ) ( struct iommu_table_group * table_group ) ;
} ;
2015-06-05 16:35:09 +10:00
struct iommu_table_group_link {
struct list_head next ;
struct rcu_head rcu ;
struct iommu_table_group * table_group ;
} ;
2015-06-05 16:35:08 +10:00
struct iommu_table_group {
2015-06-05 16:35:20 +10:00
/* IOMMU properties */
__u32 tce32_start ;
__u32 tce32_size ;
__u64 pgsizes ; /* Bitmap of supported page sizes */
__u32 max_dynamic_windows_supported ;
__u32 max_levels ;
2015-06-05 16:35:08 +10:00
struct iommu_group * group ;
struct iommu_table * tables [ IOMMU_TABLE_GROUP_MAX_TABLES ] ;
2015-06-05 16:35:10 +10:00
struct iommu_table_group_ops * ops ;
2015-06-05 16:35:08 +10:00
} ;
2013-11-21 17:43:14 +11:00
# ifdef CONFIG_IOMMU_API
2015-06-05 16:35:08 +10:00
extern void iommu_register_group ( struct iommu_table_group * table_group ,
2013-05-21 13:33:09 +10:00
int pci_domain_number , unsigned long pe_num ) ;
2013-11-21 17:43:14 +11:00
extern int iommu_add_device ( struct device * dev ) ;
extern void iommu_del_device ( struct device * dev ) ;
2015-02-21 11:00:50 -08:00
extern int __init tce_iommu_bus_notifier_init ( void ) ;
2015-06-05 16:35:15 +10:00
extern long iommu_tce_xchg ( struct iommu_table * tbl , unsigned long entry ,
unsigned long * hpa , enum dma_data_direction * direction ) ;
2013-11-21 17:43:14 +11:00
# else
2015-06-05 16:35:08 +10:00
static inline void iommu_register_group ( struct iommu_table_group * table_group ,
2013-11-21 17:43:14 +11:00
int pci_domain_number ,
unsigned long pe_num )
{
}
static inline int iommu_add_device ( struct device * dev )
{
return 0 ;
}
static inline void iommu_del_device ( struct device * dev )
{
}
2015-02-21 11:00:50 -08:00
static inline int __init tce_iommu_bus_notifier_init ( void )
{
return 0 ;
}
2013-11-21 17:43:14 +11:00
# endif /* !CONFIG_IOMMU_API */
2015-06-24 15:25:22 +10:00
# else
static inline void * get_iommu_table_base ( struct device * dev )
{
return NULL ;
}
static inline int dma_iommu_dma_supported ( struct device * dev , u64 mask )
{
return 0 ;
}
# endif /* CONFIG_PPC64 */
2014-11-05 15:28:30 +01:00
extern int ppc_iommu_map_sg ( struct device * dev , struct iommu_table * tbl ,
struct scatterlist * sglist , int nelems ,
unsigned long mask ,
enum dma_data_direction direction ,
struct dma_attrs * attrs ) ;
extern void ppc_iommu_unmap_sg ( struct iommu_table * tbl ,
struct scatterlist * sglist ,
int nelems ,
enum dma_data_direction direction ,
struct dma_attrs * attrs ) ;
2005-10-06 12:06:20 +10:00
2008-02-04 22:28:08 -08:00
extern void * iommu_alloc_coherent ( struct device * dev , struct iommu_table * tbl ,
size_t size , dma_addr_t * dma_handle ,
unsigned long mask , gfp_t flag , int node ) ;
2005-10-06 12:06:20 +10:00
extern void iommu_free_coherent ( struct iommu_table * tbl , size_t size ,
2006-11-11 17:25:02 +11:00
void * vaddr , dma_addr_t dma_handle ) ;
2008-10-27 20:38:08 +00:00
extern dma_addr_t iommu_map_page ( struct device * dev , struct iommu_table * tbl ,
struct page * page , unsigned long offset ,
size_t size , unsigned long mask ,
enum dma_data_direction direction ,
struct dma_attrs * attrs ) ;
extern void iommu_unmap_page ( struct iommu_table * tbl , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction ,
struct dma_attrs * attrs ) ;
2005-10-06 12:06:20 +10:00
extern void iommu_init_early_pSeries ( void ) ;
2015-03-31 16:00:48 +11:00
extern void iommu_init_early_dart ( struct pci_controller_ops * controller_ops ) ;
2007-02-04 16:36:55 -06:00
extern void iommu_init_early_pasemi ( void ) ;
2005-10-06 12:06:20 +10:00
2005-12-14 13:10:10 +11:00
extern void alloc_dart_table ( void ) ;
2007-05-03 22:28:32 +10:00
# if defined(CONFIG_PPC64) && defined(CONFIG_PM)
static inline void iommu_save ( void )
{
if ( ppc_md . iommu_save )
ppc_md . iommu_save ( ) ;
}
static inline void iommu_restore ( void )
{
if ( ppc_md . iommu_restore )
ppc_md . iommu_restore ( ) ;
}
# endif
2005-10-06 12:06:20 +10:00
2013-05-21 13:33:09 +10:00
/* The API to support IOMMU operations for VFIO */
extern int iommu_tce_clear_param_check ( struct iommu_table * tbl ,
unsigned long ioba , unsigned long tce_value ,
unsigned long npages ) ;
extern int iommu_tce_put_param_check ( struct iommu_table * tbl ,
unsigned long ioba , unsigned long tce ) ;
extern void iommu_flush_tce ( struct iommu_table * tbl ) ;
extern int iommu_take_ownership ( struct iommu_table * tbl ) ;
extern void iommu_release_ownership ( struct iommu_table * tbl ) ;
extern enum dma_data_direction iommu_tce_direction ( unsigned long tce ) ;
2015-06-05 16:35:05 +10:00
extern unsigned long iommu_direction_to_tce_perm ( enum dma_data_direction dir ) ;
2013-05-21 13:33:09 +10:00
2005-12-16 22:43:46 +01:00
# endif /* __KERNEL__ */
2005-10-06 12:06:20 +10:00
# endif /* _ASM_IOMMU_H */