2014-11-14 17:16:49 +00:00
# ifndef __IO_PGTABLE_H
# define __IO_PGTABLE_H
2016-01-26 17:13:13 +00:00
# include <linux/bitops.h>
2014-11-14 17:16:49 +00:00
/*
* Public API for use by IOMMU drivers
*/
enum io_pgtable_fmt {
2014-11-14 17:18:23 +00:00
ARM_32_LPAE_S1 ,
ARM_32_LPAE_S2 ,
ARM_64_LPAE_S1 ,
ARM_64_LPAE_S2 ,
2016-01-26 17:13:13 +00:00
ARM_V7S ,
2014-11-14 17:16:49 +00:00
IO_PGTABLE_NUM_FMTS ,
} ;
/**
* struct iommu_gather_ops - IOMMU callbacks for TLB and page table management .
*
* @ tlb_flush_all : Synchronously invalidate the entire TLB context .
* @ tlb_add_flush : Queue up a TLB invalidation for a virtual address range .
2015-07-29 19:46:09 +01:00
* @ tlb_sync : Ensure any queued TLB invalidation has taken effect , and
* any corresponding page table updates are visible to the
* IOMMU .
2014-11-14 17:16:49 +00:00
*
* Note that these can all be called in atomic context and must therefore
* not block .
*/
struct iommu_gather_ops {
void ( * tlb_flush_all ) ( void * cookie ) ;
2015-12-07 18:18:53 +00:00
void ( * tlb_add_flush ) ( unsigned long iova , size_t size , size_t granule ,
bool leaf , void * cookie ) ;
2014-11-14 17:16:49 +00:00
void ( * tlb_sync ) ( void * cookie ) ;
} ;
/**
* struct io_pgtable_cfg - Configuration data for a set of page tables .
*
* @ quirks : A bitmap of hardware quirks that require some special
* action by the low - level page table allocator .
* @ pgsize_bitmap : A bitmap of page sizes supported by this set of page
* tables .
* @ ias : Input address ( iova ) size , in bits .
* @ oas : Output address ( paddr ) size , in bits .
* @ tlb : TLB management callbacks for this set of tables .
2015-07-29 19:46:04 +01:00
* @ iommu_dev : The device representing the DMA configuration for the
* page table walker .
2014-11-14 17:16:49 +00:00
*/
struct io_pgtable_cfg {
2016-02-12 17:09:46 +00:00
/*
* IO_PGTABLE_QUIRK_ARM_NS : ( ARM formats ) Set NS and NSTABLE bits in
* stage 1 PTEs , for hardware which insists on validating them
* even in non - secure state where they should normally be ignored .
*
* IO_PGTABLE_QUIRK_NO_PERMS : Ignore the IOMMU_READ , IOMMU_WRITE and
* IOMMU_NOEXEC flags and map everything with full access , for
* hardware which does not implement the permissions of a given
* format , and / or requires some format - specific default value .
*
* IO_PGTABLE_QUIRK_TLBI_ON_MAP : If the format forbids caching invalid
* ( unmapped ) entries but the hardware might do so anyway , perform
* TLB maintenance when mapping as well as when unmapping .
*/
# define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
# define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
# define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
unsigned long quirks ;
2014-11-14 17:16:49 +00:00
unsigned long pgsize_bitmap ;
unsigned int ias ;
unsigned int oas ;
const struct iommu_gather_ops * tlb ;
2015-07-29 19:46:04 +01:00
struct device * iommu_dev ;
2014-11-14 17:16:49 +00:00
/* Low-level data specific to the table format */
union {
2014-11-14 17:18:23 +00:00
struct {
u64 ttbr [ 2 ] ;
u64 tcr ;
u64 mair [ 2 ] ;
} arm_lpae_s1_cfg ;
struct {
u64 vttbr ;
u64 vtcr ;
} arm_lpae_s2_cfg ;
2016-01-26 17:13:13 +00:00
struct {
u32 ttbr [ 2 ] ;
u32 tcr ;
u32 nmrr ;
u32 prrr ;
} arm_v7s_cfg ;
2014-11-14 17:16:49 +00:00
} ;
} ;
/**
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers .
*
* @ map : Map a physically contiguous memory region .
* @ unmap : Unmap a physically contiguous memory region .
* @ iova_to_phys : Translate iova to physical address .
*
* These functions map directly onto the iommu_ops member functions with
* the same names .
*/
struct io_pgtable_ops {
int ( * map ) ( struct io_pgtable_ops * ops , unsigned long iova ,
phys_addr_t paddr , size_t size , int prot ) ;
int ( * unmap ) ( struct io_pgtable_ops * ops , unsigned long iova ,
size_t size ) ;
phys_addr_t ( * iova_to_phys ) ( struct io_pgtable_ops * ops ,
unsigned long iova ) ;
} ;
/**
* alloc_io_pgtable_ops ( ) - Allocate a page table allocator for use by an IOMMU .
*
* @ fmt : The page table format .
* @ cfg : The page table configuration . This will be modified to represent
* the configuration actually provided by the allocator ( e . g . the
* pgsize_bitmap may be restricted ) .
* @ cookie : An opaque token provided by the IOMMU driver and passed back to
* the callback routines in cfg - > tlb .
*/
struct io_pgtable_ops * alloc_io_pgtable_ops ( enum io_pgtable_fmt fmt ,
struct io_pgtable_cfg * cfg ,
void * cookie ) ;
/**
* free_io_pgtable_ops ( ) - Free an io_pgtable_ops structure . The caller
* * must * ensure that the page table is no longer
* live , but the TLB can be dirty .
*
* @ ops : The ops returned from alloc_io_pgtable_ops .
*/
void free_io_pgtable_ops ( struct io_pgtable_ops * ops ) ;
/*
* Internal structures for page table allocator implementations .
*/
/**
* struct io_pgtable - Internal structure describing a set of page tables .
*
* @ fmt : The page table format .
* @ cookie : An opaque token provided by the IOMMU driver and passed back to
* any callback routines .
2016-01-26 17:13:15 +00:00
* @ tlb_sync_pending : Private flag for optimising out redundant syncs .
2014-11-14 17:16:49 +00:00
* @ cfg : A copy of the page table configuration .
* @ ops : The page table operations in use for this set of page tables .
*/
struct io_pgtable {
enum io_pgtable_fmt fmt ;
void * cookie ;
2016-01-26 17:13:15 +00:00
bool tlb_sync_pending ;
2014-11-14 17:16:49 +00:00
struct io_pgtable_cfg cfg ;
struct io_pgtable_ops ops ;
} ;
2015-12-04 17:53:01 +00:00
# define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
2016-01-26 17:13:14 +00:00
static inline void io_pgtable_tlb_flush_all ( struct io_pgtable * iop )
{
iop - > cfg . tlb - > tlb_flush_all ( iop - > cookie ) ;
2016-01-26 17:13:15 +00:00
iop - > tlb_sync_pending = true ;
2016-01-26 17:13:14 +00:00
}
static inline void io_pgtable_tlb_add_flush ( struct io_pgtable * iop ,
unsigned long iova , size_t size , size_t granule , bool leaf )
{
iop - > cfg . tlb - > tlb_add_flush ( iova , size , granule , leaf , iop - > cookie ) ;
2016-01-26 17:13:15 +00:00
iop - > tlb_sync_pending = true ;
2016-01-26 17:13:14 +00:00
}
static inline void io_pgtable_tlb_sync ( struct io_pgtable * iop )
{
2016-01-26 17:13:15 +00:00
if ( iop - > tlb_sync_pending ) {
iop - > cfg . tlb - > tlb_sync ( iop - > cookie ) ;
iop - > tlb_sync_pending = false ;
}
2016-01-26 17:13:14 +00:00
}
2014-11-14 17:16:49 +00:00
/**
* struct io_pgtable_init_fns - Alloc / free a set of page tables for a
* particular format .
*
* @ alloc : Allocate a set of page tables described by cfg .
* @ free : Free the page tables associated with iop .
*/
struct io_pgtable_init_fns {
struct io_pgtable * ( * alloc ) ( struct io_pgtable_cfg * cfg , void * cookie ) ;
void ( * free ) ( struct io_pgtable * iop ) ;
} ;
2015-08-13 12:01:10 +02:00
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns ;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns ;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns ;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns ;
2016-01-26 17:13:13 +00:00
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns ;
2015-08-13 12:01:10 +02:00
2014-11-14 17:16:49 +00:00
# endif /* __IO_PGTABLE_H */