2015-10-01 22:13:58 +03:00
/*
* A fairly generic DMA - API to IOMMU - API glue layer .
*
* Copyright ( C ) 2014 - 2015 ARM Ltd .
*
* based in part on arch / arm / mm / dma - mapping . c :
* Copyright ( C ) 2000 - 2004 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2018-02-13 18:20:51 +03:00
# include <linux/acpi_iort.h>
2015-10-01 22:13:58 +03:00
# include <linux/device.h>
# include <linux/dma-iommu.h>
2015-12-18 20:01:46 +03:00
# include <linux/gfp.h>
2015-10-01 22:13:58 +03:00
# include <linux/huge_mm.h>
# include <linux/iommu.h>
# include <linux/iova.h>
2016-09-12 19:13:59 +03:00
# include <linux/irq.h>
2015-10-01 22:13:58 +03:00
# include <linux/mm.h>
2016-09-12 19:14:00 +03:00
# include <linux/pci.h>
2015-12-18 20:01:46 +03:00
# include <linux/scatterlist.h>
# include <linux/vmalloc.h>
2015-10-01 22:13:58 +03:00
2016-09-12 19:13:59 +03:00
struct iommu_dma_msi_page {
struct list_head list ;
dma_addr_t iova ;
phys_addr_t phys ;
} ;
2017-01-19 23:57:46 +03:00
enum iommu_dma_cookie_type {
IOMMU_DMA_IOVA_COOKIE ,
IOMMU_DMA_MSI_COOKIE ,
} ;
2016-09-12 19:13:59 +03:00
struct iommu_dma_cookie {
2017-01-19 23:57:46 +03:00
enum iommu_dma_cookie_type type ;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
struct iova_domain iovad ;
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova ;
} ;
struct list_head msi_page_list ;
spinlock_t msi_lock ;
2018-09-20 19:10:22 +03:00
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain * fq_domain ;
2016-09-12 19:13:59 +03:00
} ;
2017-01-19 23:57:46 +03:00
static inline size_t cookie_msi_granule ( struct iommu_dma_cookie * cookie )
{
if ( cookie - > type = = IOMMU_DMA_IOVA_COOKIE )
return cookie - > iovad . granule ;
return PAGE_SIZE ;
}
static struct iommu_dma_cookie * cookie_alloc ( enum iommu_dma_cookie_type type )
{
struct iommu_dma_cookie * cookie ;
cookie = kzalloc ( sizeof ( * cookie ) , GFP_KERNEL ) ;
if ( cookie ) {
spin_lock_init ( & cookie - > msi_lock ) ;
INIT_LIST_HEAD ( & cookie - > msi_page_list ) ;
cookie - > type = type ;
}
return cookie ;
2016-09-12 19:13:59 +03:00
}
2015-10-01 22:13:58 +03:00
int iommu_dma_init ( void )
{
return iova_cache_get ( ) ;
}
/**
* iommu_get_dma_cookie - Acquire DMA - API resources for a domain
* @ domain : IOMMU domain to prepare for DMA - API usage
*
* IOMMU drivers should normally call this from their domain_alloc
* callback when domain - > type = = IOMMU_DOMAIN_DMA .
*/
int iommu_get_dma_cookie ( struct iommu_domain * domain )
2017-01-19 23:57:46 +03:00
{
if ( domain - > iova_cookie )
return - EEXIST ;
domain - > iova_cookie = cookie_alloc ( IOMMU_DMA_IOVA_COOKIE ) ;
if ( ! domain - > iova_cookie )
return - ENOMEM ;
return 0 ;
}
EXPORT_SYMBOL ( iommu_get_dma_cookie ) ;
/**
* iommu_get_msi_cookie - Acquire just MSI remapping resources
* @ domain : IOMMU domain to prepare
* @ base : Start address of IOVA region for MSI mappings
*
* Users who manage their own IOVA allocation and do not want DMA API support ,
* but would still like to take advantage of automatic MSI remapping , can use
* this to initialise their own domain appropriately . Users should reserve a
* contiguous IOVA region , starting at @ base , large enough to accommodate the
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
* used by the devices attached to @ domain .
*/
int iommu_get_msi_cookie ( struct iommu_domain * domain , dma_addr_t base )
2015-10-01 22:13:58 +03:00
{
2016-09-12 19:13:59 +03:00
struct iommu_dma_cookie * cookie ;
2015-10-01 22:13:58 +03:00
2017-01-19 23:57:46 +03:00
if ( domain - > type ! = IOMMU_DOMAIN_UNMANAGED )
return - EINVAL ;
2015-10-01 22:13:58 +03:00
if ( domain - > iova_cookie )
return - EEXIST ;
2017-01-19 23:57:46 +03:00
cookie = cookie_alloc ( IOMMU_DMA_MSI_COOKIE ) ;
2016-09-12 19:13:59 +03:00
if ( ! cookie )
return - ENOMEM ;
2015-10-01 22:13:58 +03:00
2017-01-19 23:57:46 +03:00
cookie - > msi_iova = base ;
2016-09-12 19:13:59 +03:00
domain - > iova_cookie = cookie ;
return 0 ;
2015-10-01 22:13:58 +03:00
}
2017-01-19 23:57:46 +03:00
EXPORT_SYMBOL ( iommu_get_msi_cookie ) ;
2015-10-01 22:13:58 +03:00
/**
* iommu_put_dma_cookie - Release a domain ' s DMA mapping resources
2017-01-19 23:57:46 +03:00
* @ domain : IOMMU domain previously prepared by iommu_get_dma_cookie ( ) or
* iommu_get_msi_cookie ( )
2015-10-01 22:13:58 +03:00
*
* IOMMU drivers should normally call this from their domain_free callback .
*/
void iommu_put_dma_cookie ( struct iommu_domain * domain )
{
2016-09-12 19:13:59 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iommu_dma_msi_page * msi , * tmp ;
2015-10-01 22:13:58 +03:00
2016-09-12 19:13:59 +03:00
if ( ! cookie )
2015-10-01 22:13:58 +03:00
return ;
2017-01-19 23:57:46 +03:00
if ( cookie - > type = = IOMMU_DMA_IOVA_COOKIE & & cookie - > iovad . granule )
2016-09-12 19:13:59 +03:00
put_iova_domain ( & cookie - > iovad ) ;
list_for_each_entry_safe ( msi , tmp , & cookie - > msi_page_list , list ) {
list_del ( & msi - > list ) ;
kfree ( msi ) ;
}
kfree ( cookie ) ;
2015-10-01 22:13:58 +03:00
domain - > iova_cookie = NULL ;
}
EXPORT_SYMBOL ( iommu_put_dma_cookie ) ;
2017-03-16 20:00:19 +03:00
/**
* iommu_dma_get_resv_regions - Reserved region driver helper
* @ dev : Device from iommu_get_resv_regions ( )
* @ list : Reserved region list from iommu_get_resv_regions ( )
*
* IOMMU drivers can use this to implement their . get_resv_regions callback
2018-04-18 14:40:42 +03:00
* for general non - IOMMU - specific reservations . Currently , this covers GICv3
* ITS region reservation on ACPI based ARM platforms that may require HW MSI
* reservation .
2017-03-16 20:00:19 +03:00
*/
void iommu_dma_get_resv_regions ( struct device * dev , struct list_head * list )
2016-09-12 19:14:00 +03:00
{
2018-11-29 16:01:00 +03:00
if ( ! is_of_node ( dev_iommu_fwspec_get ( dev ) - > iommu_fwnode ) )
2018-04-18 14:40:42 +03:00
iort_iommu_msi_get_resv_regions ( dev , list ) ;
2017-03-16 20:00:19 +03:00
2016-09-12 19:14:00 +03:00
}
2017-03-16 20:00:19 +03:00
EXPORT_SYMBOL ( iommu_dma_get_resv_regions ) ;
2016-09-12 19:14:00 +03:00
2017-03-16 20:00:18 +03:00
static int cookie_init_hw_msi_region ( struct iommu_dma_cookie * cookie ,
phys_addr_t start , phys_addr_t end )
{
struct iova_domain * iovad = & cookie - > iovad ;
struct iommu_dma_msi_page * msi_page ;
int i , num_pages ;
start - = iova_offset ( iovad , start ) ;
num_pages = iova_align ( iovad , end - start ) > > iova_shift ( iovad ) ;
msi_page = kcalloc ( num_pages , sizeof ( * msi_page ) , GFP_KERNEL ) ;
if ( ! msi_page )
return - ENOMEM ;
for ( i = 0 ; i < num_pages ; i + + ) {
msi_page [ i ] . phys = start ;
msi_page [ i ] . iova = start ;
INIT_LIST_HEAD ( & msi_page [ i ] . list ) ;
list_add ( & msi_page [ i ] . list , & cookie - > msi_page_list ) ;
start + = iovad - > granule ;
}
return 0 ;
}
2019-05-03 17:05:33 +03:00
static int iova_reserve_pci_windows ( struct pci_dev * dev ,
2018-04-18 14:40:42 +03:00
struct iova_domain * iovad )
{
struct pci_host_bridge * bridge = pci_find_host_bridge ( dev - > bus ) ;
struct resource_entry * window ;
unsigned long lo , hi ;
2019-05-03 17:05:33 +03:00
phys_addr_t start = 0 , end ;
2018-04-18 14:40:42 +03:00
resource_list_for_each_entry ( window , & bridge - > windows ) {
if ( resource_type ( window - > res ) ! = IORESOURCE_MEM )
continue ;
lo = iova_pfn ( iovad , window - > res - > start - window - > offset ) ;
hi = iova_pfn ( iovad , window - > res - > end - window - > offset ) ;
reserve_iova ( iovad , lo , hi ) ;
}
2019-05-03 17:05:33 +03:00
/* Get reserved DMA windows from host bridge */
resource_list_for_each_entry ( window , & bridge - > dma_ranges ) {
end = window - > res - > start - window - > offset ;
resv_iova :
if ( end > start ) {
lo = iova_pfn ( iovad , start ) ;
hi = iova_pfn ( iovad , end ) ;
reserve_iova ( iovad , lo , hi ) ;
} else {
/* dma_ranges list should be sorted */
dev_err ( & dev - > dev , " Failed to reserve IOVA \n " ) ;
return - EINVAL ;
}
start = window - > res - > end - window - > offset + 1 ;
/* If window is last entry */
if ( window - > node . next = = & bridge - > dma_ranges & &
end ! = ~ ( dma_addr_t ) 0 ) {
end = ~ ( dma_addr_t ) 0 ;
goto resv_iova ;
}
}
return 0 ;
2018-04-18 14:40:42 +03:00
}
2017-03-16 20:00:18 +03:00
static int iova_reserve_iommu_regions ( struct device * dev ,
struct iommu_domain * domain )
{
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
struct iommu_resv_region * region ;
LIST_HEAD ( resv_regions ) ;
int ret = 0 ;
2019-05-03 17:05:33 +03:00
if ( dev_is_pci ( dev ) ) {
ret = iova_reserve_pci_windows ( to_pci_dev ( dev ) , iovad ) ;
if ( ret )
return ret ;
}
2018-04-18 14:40:42 +03:00
2017-03-16 20:00:18 +03:00
iommu_get_resv_regions ( dev , & resv_regions ) ;
list_for_each_entry ( region , & resv_regions , list ) {
unsigned long lo , hi ;
/* We ARE the software that manages these! */
if ( region - > type = = IOMMU_RESV_SW_MSI )
continue ;
lo = iova_pfn ( iovad , region - > start ) ;
hi = iova_pfn ( iovad , region - > start + region - > length - 1 ) ;
reserve_iova ( iovad , lo , hi ) ;
if ( region - > type = = IOMMU_RESV_MSI )
ret = cookie_init_hw_msi_region ( cookie , region - > start ,
region - > start + region - > length ) ;
if ( ret )
break ;
}
iommu_put_resv_regions ( dev , & resv_regions ) ;
return ret ;
}
2018-09-20 19:10:22 +03:00
static void iommu_dma_flush_iotlb_all ( struct iova_domain * iovad )
{
struct iommu_dma_cookie * cookie ;
struct iommu_domain * domain ;
cookie = container_of ( iovad , struct iommu_dma_cookie , iovad ) ;
domain = cookie - > fq_domain ;
/*
* The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
* implies that ops - > flush_iotlb_all must be non - NULL .
*/
domain - > ops - > flush_iotlb_all ( domain ) ;
}
2015-10-01 22:13:58 +03:00
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @ domain : IOMMU domain previously prepared by iommu_get_dma_cookie ( )
* @ base : IOVA at which the mappable address space starts
* @ size : Size of IOVA space
2016-09-12 19:14:00 +03:00
* @ dev : Device the domain is being initialised for
2015-10-01 22:13:58 +03:00
*
* @ base and @ size should be exact multiples of IOMMU page granularity to
* avoid rounding surprises . If necessary , we reserve the page at address 0
* to ensure it is an invalid IOVA . It is safe to reinitialise a domain , but
* any change which could make prior IOVAs invalid will fail .
*/
2016-09-12 19:14:00 +03:00
int iommu_dma_init_domain ( struct iommu_domain * domain , dma_addr_t base ,
u64 size , struct device * dev )
2015-10-01 22:13:58 +03:00
{
2017-01-19 23:57:46 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2019-01-24 10:10:02 +03:00
unsigned long order , base_pfn ;
2018-09-20 19:10:22 +03:00
int attr ;
2015-10-01 22:13:58 +03:00
2017-01-19 23:57:46 +03:00
if ( ! cookie | | cookie - > type ! = IOMMU_DMA_IOVA_COOKIE )
return - EINVAL ;
2015-10-01 22:13:58 +03:00
/* Use the smallest supported page size for IOVA granularity */
2016-04-07 20:42:06 +03:00
order = __ffs ( domain - > pgsize_bitmap ) ;
2015-10-01 22:13:58 +03:00
base_pfn = max_t ( unsigned long , 1 , base > > order ) ;
/* Check the domain allows at least some access to the device... */
if ( domain - > geometry . force_aperture ) {
if ( base > domain - > geometry . aperture_end | |
base + size < = domain - > geometry . aperture_start ) {
pr_warn ( " specified DMA range outside IOMMU capability \n " ) ;
return - EFAULT ;
}
/* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t ( unsigned long , base_pfn ,
domain - > geometry . aperture_start > > order ) ;
}
2017-01-16 16:24:54 +03:00
/* start_pfn is always nonzero for an already-initialised domain */
2015-10-01 22:13:58 +03:00
if ( iovad - > start_pfn ) {
if ( 1UL < < order ! = iovad - > granule | |
2017-01-16 16:24:54 +03:00
base_pfn ! = iovad - > start_pfn ) {
2015-10-01 22:13:58 +03:00
pr_warn ( " Incompatible range for DMA domain \n " ) ;
return - EFAULT ;
}
2017-03-16 20:00:18 +03:00
return 0 ;
2015-10-01 22:13:58 +03:00
}
2017-03-16 20:00:18 +03:00
2017-09-21 18:52:45 +03:00
init_iova_domain ( iovad , 1UL < < order , base_pfn ) ;
2018-09-20 19:10:22 +03:00
if ( ! cookie - > fq_domain & & ! iommu_domain_get_attr ( domain ,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE , & attr ) & & attr ) {
cookie - > fq_domain = domain ;
init_iova_flush_queue ( iovad , iommu_dma_flush_iotlb_all , NULL ) ;
}
2017-03-16 20:00:18 +03:00
if ( ! dev )
return 0 ;
return iova_reserve_iommu_regions ( dev , domain ) ;
2015-10-01 22:13:58 +03:00
}
EXPORT_SYMBOL ( iommu_dma_init_domain ) ;
/**
2017-01-06 16:28:12 +03:00
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags .
2015-10-01 22:13:58 +03:00
* @ dir : Direction of DMA transfer
* @ coherent : Is the DMA master cache - coherent ?
2017-01-06 16:28:12 +03:00
* @ attrs : DMA attributes for the mapping
2015-10-01 22:13:58 +03:00
*
* Return : corresponding IOMMU API page protection flags
*/
2017-01-06 16:28:12 +03:00
int dma_info_to_prot ( enum dma_data_direction dir , bool coherent ,
unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
int prot = coherent ? IOMMU_CACHE : 0 ;
2017-01-06 16:28:12 +03:00
if ( attrs & DMA_ATTR_PRIVILEGED )
prot | = IOMMU_PRIV ;
2015-10-01 22:13:58 +03:00
switch ( dir ) {
case DMA_BIDIRECTIONAL :
return prot | IOMMU_READ | IOMMU_WRITE ;
case DMA_TO_DEVICE :
return prot | IOMMU_READ ;
case DMA_FROM_DEVICE :
return prot | IOMMU_WRITE ;
default :
return 0 ;
}
}
2017-03-31 17:46:05 +03:00
static dma_addr_t iommu_dma_alloc_iova ( struct iommu_domain * domain ,
size_t size , dma_addr_t dma_limit , struct device * dev )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:06 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2017-03-31 17:46:07 +03:00
unsigned long shift , iova_len , iova = 0 ;
2015-10-01 22:13:58 +03:00
2017-03-31 17:46:06 +03:00
if ( cookie - > type = = IOMMU_DMA_MSI_COOKIE ) {
cookie - > msi_iova + = size ;
return cookie - > msi_iova - size ;
}
shift = iova_shift ( iovad ) ;
iova_len = size > > shift ;
2017-03-31 17:46:07 +03:00
/*
* Freeing non - power - of - two - sized allocations back into the IOVA caches
* will come back to bite us badly , so we have to waste a bit of space
* rounding up anything cacheable to make sure that can ' t happen . The
* order of the unadjusted size will still match upon freeing .
*/
if ( iova_len < ( 1 < < ( IOVA_RANGE_CACHE_MAX_SIZE - 1 ) ) )
iova_len = roundup_pow_of_two ( iova_len ) ;
2017-03-31 17:46:06 +03:00
2018-07-24 01:16:10 +03:00
if ( dev - > bus_dma_mask )
dma_limit & = dev - > bus_dma_mask ;
2016-08-09 19:31:35 +03:00
if ( domain - > geometry . force_aperture )
dma_limit = min ( dma_limit , domain - > geometry . aperture_end ) ;
2017-01-16 16:24:55 +03:00
/* Try to get PCI devices a SAC address */
if ( dma_limit > DMA_BIT_MASK ( 32 ) & & dev_is_pci ( dev ) )
2017-09-20 11:52:02 +03:00
iova = alloc_iova_fast ( iovad , iova_len ,
DMA_BIT_MASK ( 32 ) > > shift , false ) ;
2017-03-31 17:46:07 +03:00
2017-01-16 16:24:55 +03:00
if ( ! iova )
2017-09-20 11:52:02 +03:00
iova = alloc_iova_fast ( iovad , iova_len , dma_limit > > shift ,
true ) ;
2017-01-16 16:24:55 +03:00
2017-03-31 17:46:07 +03:00
return ( dma_addr_t ) iova < < shift ;
2015-10-01 22:13:58 +03:00
}
2017-03-31 17:46:05 +03:00
static void iommu_dma_free_iova ( struct iommu_dma_cookie * cookie ,
dma_addr_t iova , size_t size )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:05 +03:00
struct iova_domain * iovad = & cookie - > iovad ;
2015-10-01 22:13:58 +03:00
2017-03-31 17:46:06 +03:00
/* The MSI case is only ever cleaning up its most recent allocation */
2017-03-31 17:46:07 +03:00
if ( cookie - > type = = IOMMU_DMA_MSI_COOKIE )
2017-03-31 17:46:06 +03:00
cookie - > msi_iova - = size ;
2018-09-20 19:10:22 +03:00
else if ( cookie - > fq_domain ) /* non-strict mode */
queue_iova ( iovad , iova_pfn ( iovad , iova ) ,
size > > iova_shift ( iovad ) , 0 ) ;
2017-03-31 17:46:07 +03:00
else
2017-05-15 18:01:30 +03:00
free_iova_fast ( iovad , iova_pfn ( iovad , iova ) ,
size > > iova_shift ( iovad ) ) ;
2017-03-31 17:46:05 +03:00
}
static void __iommu_dma_unmap ( struct iommu_domain * domain , dma_addr_t dma_addr ,
size_t size )
{
2017-03-31 17:46:06 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2017-03-31 17:46:05 +03:00
size_t iova_off = iova_offset ( iovad , dma_addr ) ;
dma_addr - = iova_off ;
size = iova_align ( iovad , size + iova_off ) ;
2018-09-20 19:10:22 +03:00
WARN_ON ( iommu_unmap_fast ( domain , dma_addr , size ) ! = size ) ;
if ( ! cookie - > fq_domain )
iommu_tlb_sync ( domain ) ;
2017-03-31 17:46:06 +03:00
iommu_dma_free_iova ( cookie , dma_addr , size ) ;
2015-10-01 22:13:58 +03:00
}
static void __iommu_dma_free_pages ( struct page * * pages , int count )
{
while ( count - - )
__free_page ( pages [ count ] ) ;
kvfree ( pages ) ;
}
2018-11-30 14:14:00 +03:00
static struct page * * __iommu_dma_alloc_pages ( struct device * dev ,
unsigned int count , unsigned long order_mask , gfp_t gfp )
2015-10-01 22:13:58 +03:00
{
struct page * * pages ;
2018-11-30 14:14:00 +03:00
unsigned int i = 0 , nid = dev_to_node ( dev ) ;
2016-04-13 19:29:10 +03:00
order_mask & = ( 2U < < MAX_ORDER ) - 1 ;
if ( ! order_mask )
return NULL ;
2015-10-01 22:13:58 +03:00
2018-11-30 14:14:00 +03:00
pages = kvzalloc ( count * sizeof ( * pages ) , GFP_KERNEL ) ;
2015-10-01 22:13:58 +03:00
if ( ! pages )
return NULL ;
/* IOMMU can map any pages, so himem can also be used here */
gfp | = __GFP_NOWARN | __GFP_HIGHMEM ;
while ( count ) {
struct page * page = NULL ;
2016-04-13 19:29:10 +03:00
unsigned int order_size ;
2015-10-01 22:13:58 +03:00
/*
* Higher - order allocations are a convenience rather
* than a necessity , hence using __GFP_NORETRY until
2016-04-13 19:29:10 +03:00
* falling back to minimum - order allocations .
2015-10-01 22:13:58 +03:00
*/
2016-04-13 19:29:10 +03:00
for ( order_mask & = ( 2U < < __fls ( count ) ) - 1 ;
order_mask ; order_mask & = ~ order_size ) {
unsigned int order = __fls ( order_mask ) ;
2018-11-30 14:14:00 +03:00
gfp_t alloc_flags = gfp ;
2016-04-13 19:29:10 +03:00
order_size = 1U < < order ;
2018-11-30 14:14:00 +03:00
if ( order_mask > order_size )
alloc_flags | = __GFP_NORETRY ;
page = alloc_pages_node ( nid , alloc_flags , order ) ;
2015-10-01 22:13:58 +03:00
if ( ! page )
continue ;
2016-04-13 19:29:10 +03:00
if ( ! order )
break ;
if ( ! PageCompound ( page ) ) {
2015-10-01 22:13:58 +03:00
split_page ( page , order ) ;
break ;
2016-04-13 19:29:10 +03:00
} else if ( ! split_huge_page ( page ) ) {
break ;
2015-10-01 22:13:58 +03:00
}
2016-04-13 19:29:10 +03:00
__free_pages ( page , order ) ;
2015-10-01 22:13:58 +03:00
}
if ( ! page ) {
__iommu_dma_free_pages ( pages , i ) ;
return NULL ;
}
2016-04-13 19:29:10 +03:00
count - = order_size ;
while ( order_size - - )
2015-10-01 22:13:58 +03:00
pages [ i + + ] = page + + ;
}
return pages ;
}
/**
* iommu_dma_free - Free a buffer allocated by iommu_dma_alloc ( )
* @ dev : Device which owns this buffer
* @ pages : Array of buffer pages as returned by iommu_dma_alloc ( )
* @ size : Size of buffer in bytes
* @ handle : DMA address of buffer
*
* Frees both the pages associated with the buffer , and the array
* describing them
*/
void iommu_dma_free ( struct device * dev , struct page * * pages , size_t size ,
dma_addr_t * handle )
{
2018-09-12 18:24:13 +03:00
__iommu_dma_unmap ( iommu_get_dma_domain ( dev ) , * handle , size ) ;
2015-10-01 22:13:58 +03:00
__iommu_dma_free_pages ( pages , PAGE_ALIGN ( size ) > > PAGE_SHIFT ) ;
2018-11-21 21:35:19 +03:00
* handle = DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
}
/**
* iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
* @ dev : Device to allocate memory for . Must be a real device
* attached to an iommu_dma_domain
* @ size : Size of buffer in bytes
* @ gfp : Allocation flags
2016-04-13 19:29:10 +03:00
* @ attrs : DMA attributes for this allocation
2015-10-01 22:13:58 +03:00
* @ prot : IOMMU mapping flags
* @ handle : Out argument for allocated DMA handle
* @ flush_page : Arch callback which must ensure PAGE_SIZE bytes from the
* given VA / PA are visible to the given non - coherent device .
*
* If @ size is less than PAGE_SIZE , then a full CPU page will be allocated ,
* but an IOMMU which supports smaller pages might not map the whole thing .
*
* Return : Array of struct page pointers describing the buffer ,
* or NULL on failure .
*/
2016-04-13 19:29:10 +03:00
struct page * * iommu_dma_alloc ( struct device * dev , size_t size , gfp_t gfp ,
2016-08-03 23:46:00 +03:00
unsigned long attrs , int prot , dma_addr_t * handle ,
2015-10-01 22:13:58 +03:00
void ( * flush_page ) ( struct device * , const void * , phys_addr_t ) )
{
2018-09-12 18:24:13 +03:00
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
2017-03-31 17:46:05 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2015-10-01 22:13:58 +03:00
struct page * * pages ;
struct sg_table sgt ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2016-04-13 19:29:10 +03:00
unsigned int count , min_size , alloc_sizes = domain - > pgsize_bitmap ;
2015-10-01 22:13:58 +03:00
2018-11-21 21:35:19 +03:00
* handle = DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
2016-04-13 19:29:10 +03:00
min_size = alloc_sizes & - alloc_sizes ;
if ( min_size < PAGE_SIZE ) {
min_size = PAGE_SIZE ;
alloc_sizes | = PAGE_SIZE ;
} else {
size = ALIGN ( size , min_size ) ;
}
2016-08-03 23:46:00 +03:00
if ( attrs & DMA_ATTR_ALLOC_SINGLE_PAGES )
2016-04-13 19:29:10 +03:00
alloc_sizes = min_size ;
count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
2018-11-30 14:14:00 +03:00
pages = __iommu_dma_alloc_pages ( dev , count , alloc_sizes > > PAGE_SHIFT ,
gfp ) ;
2015-10-01 22:13:58 +03:00
if ( ! pages )
return NULL ;
2017-03-31 17:46:05 +03:00
size = iova_align ( iovad , size ) ;
iova = iommu_dma_alloc_iova ( domain , size , dev - > coherent_dma_mask , dev ) ;
2015-10-01 22:13:58 +03:00
if ( ! iova )
goto out_free_pages ;
if ( sg_alloc_table_from_pages ( & sgt , pages , count , 0 , size , GFP_KERNEL ) )
goto out_free_iova ;
if ( ! ( prot & IOMMU_CACHE ) ) {
struct sg_mapping_iter miter ;
/*
* The CPU - centric flushing implied by SG_MITER_TO_SG isn ' t
* sufficient here , so skip it by using the " wrong " direction .
*/
sg_miter_start ( & miter , sgt . sgl , sgt . orig_nents , SG_MITER_FROM_SG ) ;
while ( sg_miter_next ( & miter ) )
flush_page ( dev , miter . addr , page_to_phys ( miter . page ) ) ;
sg_miter_stop ( & miter ) ;
}
2017-03-31 17:46:05 +03:00
if ( iommu_map_sg ( domain , iova , sgt . sgl , sgt . orig_nents , prot )
2015-10-01 22:13:58 +03:00
< size )
goto out_free_sg ;
2017-03-31 17:46:05 +03:00
* handle = iova ;
2015-10-01 22:13:58 +03:00
sg_free_table ( & sgt ) ;
return pages ;
out_free_sg :
sg_free_table ( & sgt ) ;
out_free_iova :
2017-03-31 17:46:05 +03:00
iommu_dma_free_iova ( cookie , iova , size ) ;
2015-10-01 22:13:58 +03:00
out_free_pages :
__iommu_dma_free_pages ( pages , count ) ;
return NULL ;
}
/**
* iommu_dma_mmap - Map a buffer into provided user VMA
* @ pages : Array representing buffer from iommu_dma_alloc ( )
* @ size : Size of buffer in bytes
* @ vma : VMA describing requested userspace mapping
*
* Maps the pages of the buffer in @ pages into @ vma . The caller is responsible
* for verifying the correct size and protection of @ vma beforehand .
*/
int iommu_dma_mmap ( struct page * * pages , size_t size , struct vm_area_struct * vma )
{
2019-05-14 03:22:15 +03:00
return vm_map_pages ( vma , pages , PAGE_ALIGN ( size ) > > PAGE_SHIFT ) ;
2015-10-01 22:13:58 +03:00
}
2016-11-14 15:16:26 +03:00
static dma_addr_t __iommu_dma_map ( struct device * dev , phys_addr_t phys ,
2018-09-12 18:24:13 +03:00
size_t size , int prot , struct iommu_domain * domain )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:05 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
2017-05-15 18:01:30 +03:00
size_t iova_off = 0 ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2015-10-01 22:13:58 +03:00
2017-05-15 18:01:30 +03:00
if ( cookie - > type = = IOMMU_DMA_IOVA_COOKIE ) {
iova_off = iova_offset ( & cookie - > iovad , phys ) ;
size = iova_align ( & cookie - > iovad , size + iova_off ) ;
}
2017-03-31 17:46:05 +03:00
iova = iommu_dma_alloc_iova ( domain , size , dma_get_mask ( dev ) , dev ) ;
2015-10-01 22:13:58 +03:00
if ( ! iova )
2018-11-21 21:35:19 +03:00
return DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
2017-03-31 17:46:05 +03:00
if ( iommu_map ( domain , iova , phys - iova_off , size , prot ) ) {
iommu_dma_free_iova ( cookie , iova , size ) ;
2018-11-21 21:35:19 +03:00
return DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
}
2017-03-31 17:46:05 +03:00
return iova + iova_off ;
2015-10-01 22:13:58 +03:00
}
2016-11-14 15:16:26 +03:00
dma_addr_t iommu_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , int prot )
{
2018-09-12 18:24:13 +03:00
return __iommu_dma_map ( dev , page_to_phys ( page ) + offset , size , prot ,
iommu_get_dma_domain ( dev ) ) ;
2016-11-14 15:16:26 +03:00
}
2015-10-01 22:13:58 +03:00
void iommu_dma_unmap_page ( struct device * dev , dma_addr_t handle , size_t size ,
2016-08-03 23:46:00 +03:00
enum dma_data_direction dir , unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
2018-09-12 18:24:13 +03:00
__iommu_dma_unmap ( iommu_get_dma_domain ( dev ) , handle , size ) ;
2015-10-01 22:13:58 +03:00
}
/*
* Prepare a successfully - mapped scatterlist to give back to the caller .
2016-04-11 14:32:31 +03:00
*
* At this point the segments are already laid out by iommu_dma_map_sg ( ) to
* avoid individually crossing any boundaries , so we merely need to check a
* segment ' s start address to avoid concatenating across one .
2015-10-01 22:13:58 +03:00
*/
static int __finalise_sg ( struct device * dev , struct scatterlist * sg , int nents ,
dma_addr_t dma_addr )
{
2016-04-11 14:32:31 +03:00
struct scatterlist * s , * cur = sg ;
unsigned long seg_mask = dma_get_seg_boundary ( dev ) ;
unsigned int cur_len = 0 , max_len = dma_get_max_seg_size ( dev ) ;
int i , count = 0 ;
2015-10-01 22:13:58 +03:00
for_each_sg ( sg , s , nents , i ) {
2016-04-11 14:32:31 +03:00
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address ( s ) ;
2015-10-01 22:13:58 +03:00
unsigned int s_length = sg_dma_len ( s ) ;
2016-04-11 14:32:31 +03:00
unsigned int s_iova_len = s - > length ;
2015-10-01 22:13:58 +03:00
2016-04-11 14:32:31 +03:00
s - > offset + = s_iova_off ;
2015-10-01 22:13:58 +03:00
s - > length = s_length ;
2018-11-21 21:35:19 +03:00
sg_dma_address ( s ) = DMA_MAPPING_ERROR ;
2016-04-11 14:32:31 +03:00
sg_dma_len ( s ) = 0 ;
/*
* Now fill in the real DMA data . If . . .
* - there is a valid output segment to append to
* - and this segment starts on an IOVA page boundary
* - but doesn ' t fall at a segment boundary
* - and wouldn ' t make the resulting output segment too long
*/
if ( cur_len & & ! s_iova_off & & ( dma_addr & seg_mask ) & &
( cur_len + s_length < = max_len ) ) {
/* ...then concatenate it with the previous one */
cur_len + = s_length ;
} else {
/* Otherwise start the next output segment */
if ( i > 0 )
cur = sg_next ( cur ) ;
cur_len = s_length ;
count + + ;
sg_dma_address ( cur ) = dma_addr + s_iova_off ;
}
sg_dma_len ( cur ) = cur_len ;
dma_addr + = s_iova_len ;
if ( s_length + s_iova_off < s_iova_len )
cur_len = 0 ;
2015-10-01 22:13:58 +03:00
}
2016-04-11 14:32:31 +03:00
return count ;
2015-10-01 22:13:58 +03:00
}
/*
* If mapping failed , then just restore the original list ,
* but making sure the DMA fields are invalidated .
*/
static void __invalidate_sg ( struct scatterlist * sg , int nents )
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nents , i ) {
2018-11-21 21:35:19 +03:00
if ( sg_dma_address ( s ) ! = DMA_MAPPING_ERROR )
2016-03-10 22:28:12 +03:00
s - > offset + = sg_dma_address ( s ) ;
2015-10-01 22:13:58 +03:00
if ( sg_dma_len ( s ) )
s - > length = sg_dma_len ( s ) ;
2018-11-21 21:35:19 +03:00
sg_dma_address ( s ) = DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
sg_dma_len ( s ) = 0 ;
}
}
/*
* The DMA API client is passing in a scatterlist which could describe
* any old buffer layout , but the IOMMU API requires everything to be
* aligned to IOMMU pages . Hence the need for this complicated bit of
* impedance - matching , to be able to hand off a suitably - aligned list ,
* but still preserve the original offsets and sizes for the caller .
*/
int iommu_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , int prot )
{
2018-09-12 18:24:13 +03:00
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
2017-03-31 17:46:05 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2015-10-01 22:13:58 +03:00
struct scatterlist * s , * prev = NULL ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2015-10-01 22:13:58 +03:00
size_t iova_len = 0 ;
2016-04-11 14:32:31 +03:00
unsigned long mask = dma_get_seg_boundary ( dev ) ;
2015-10-01 22:13:58 +03:00
int i ;
/*
* Work out how much IOVA space we need , and align the segments to
* IOVA granules for the IOMMU driver to handle . With some clever
* trickery we can modify the list in - place , but reversibly , by
2016-04-11 14:32:31 +03:00
* stashing the unaligned parts in the as - yet - unused DMA fields .
2015-10-01 22:13:58 +03:00
*/
for_each_sg ( sg , s , nents , i ) {
2016-04-11 14:32:31 +03:00
size_t s_iova_off = iova_offset ( iovad , s - > offset ) ;
2015-10-01 22:13:58 +03:00
size_t s_length = s - > length ;
2016-04-11 14:32:31 +03:00
size_t pad_len = ( mask - iova_len + 1 ) & mask ;
2015-10-01 22:13:58 +03:00
2016-04-11 14:32:31 +03:00
sg_dma_address ( s ) = s_iova_off ;
2015-10-01 22:13:58 +03:00
sg_dma_len ( s ) = s_length ;
2016-04-11 14:32:31 +03:00
s - > offset - = s_iova_off ;
s_length = iova_align ( iovad , s_length + s_iova_off ) ;
2015-10-01 22:13:58 +03:00
s - > length = s_length ;
/*
2016-04-11 14:32:31 +03:00
* Due to the alignment of our single IOVA allocation , we can
* depend on these assumptions about the segment boundary mask :
* - If mask size > = IOVA size , then the IOVA range cannot
* possibly fall across a boundary , so we don ' t care .
* - If mask size < IOVA size , then the IOVA range must start
* exactly on a boundary , therefore we can lay things out
* based purely on segment lengths without needing to know
* the actual addresses beforehand .
* - The mask must be a power of 2 , so pad_len = = 0 if
* iova_len = = 0 , thus we cannot dereference prev the first
* time through here ( i . e . before it has a meaningful value ) .
2015-10-01 22:13:58 +03:00
*/
2016-04-11 14:32:31 +03:00
if ( pad_len & & pad_len < s_length - 1 ) {
2015-10-01 22:13:58 +03:00
prev - > length + = pad_len ;
iova_len + = pad_len ;
}
iova_len + = s_length ;
prev = s ;
}
2017-03-31 17:46:05 +03:00
iova = iommu_dma_alloc_iova ( domain , iova_len , dma_get_mask ( dev ) , dev ) ;
2015-10-01 22:13:58 +03:00
if ( ! iova )
goto out_restore_sg ;
/*
* We ' ll leave any physical concatenation to the IOMMU driver ' s
* implementation - it knows better than we do .
*/
2017-03-31 17:46:05 +03:00
if ( iommu_map_sg ( domain , iova , sg , nents , prot ) < iova_len )
2015-10-01 22:13:58 +03:00
goto out_free_iova ;
2017-03-31 17:46:05 +03:00
return __finalise_sg ( dev , sg , nents , iova ) ;
2015-10-01 22:13:58 +03:00
out_free_iova :
2017-03-31 17:46:05 +03:00
iommu_dma_free_iova ( cookie , iova , iova_len ) ;
2015-10-01 22:13:58 +03:00
out_restore_sg :
__invalidate_sg ( sg , nents ) ;
return 0 ;
}
void iommu_dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nents ,
2016-08-03 23:46:00 +03:00
enum dma_data_direction dir , unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:05 +03:00
dma_addr_t start , end ;
struct scatterlist * tmp ;
int i ;
2015-10-01 22:13:58 +03:00
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation , so this is incredibly easy .
*/
2017-03-31 17:46:05 +03:00
start = sg_dma_address ( sg ) ;
for_each_sg ( sg_next ( sg ) , tmp , nents - 1 , i ) {
if ( sg_dma_len ( tmp ) = = 0 )
break ;
sg = tmp ;
}
end = sg_dma_address ( sg ) + sg_dma_len ( sg ) ;
2018-09-12 18:24:13 +03:00
__iommu_dma_unmap ( iommu_get_dma_domain ( dev ) , start , end - start ) ;
2015-10-01 22:13:58 +03:00
}
2016-11-14 15:16:26 +03:00
dma_addr_t iommu_dma_map_resource ( struct device * dev , phys_addr_t phys ,
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
return __iommu_dma_map ( dev , phys , size ,
2018-09-12 18:24:13 +03:00
dma_info_to_prot ( dir , false , attrs ) | IOMMU_MMIO ,
iommu_get_dma_domain ( dev ) ) ;
2016-11-14 15:16:26 +03:00
}
void iommu_dma_unmap_resource ( struct device * dev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
2018-09-12 18:24:13 +03:00
__iommu_dma_unmap ( iommu_get_dma_domain ( dev ) , handle , size ) ;
2016-11-14 15:16:26 +03:00
}
2016-09-12 19:13:59 +03:00
static struct iommu_dma_msi_page * iommu_dma_get_msi_page ( struct device * dev ,
phys_addr_t msi_addr , struct iommu_domain * domain )
{
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iommu_dma_msi_page * msi_page ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2016-09-12 19:13:59 +03:00
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO ;
2017-01-19 23:57:46 +03:00
size_t size = cookie_msi_granule ( cookie ) ;
2016-09-12 19:13:59 +03:00
2017-01-19 23:57:46 +03:00
msi_addr & = ~ ( phys_addr_t ) ( size - 1 ) ;
2016-09-12 19:13:59 +03:00
list_for_each_entry ( msi_page , & cookie - > msi_page_list , list )
if ( msi_page - > phys = = msi_addr )
return msi_page ;
msi_page = kzalloc ( sizeof ( * msi_page ) , GFP_ATOMIC ) ;
if ( ! msi_page )
return NULL ;
2018-09-12 18:24:13 +03:00
iova = __iommu_dma_map ( dev , msi_addr , size , prot , domain ) ;
2018-11-21 21:35:19 +03:00
if ( iova = = DMA_MAPPING_ERROR )
2017-03-31 17:46:06 +03:00
goto out_free_page ;
2016-09-12 19:13:59 +03:00
INIT_LIST_HEAD ( & msi_page - > list ) ;
2017-03-31 17:46:06 +03:00
msi_page - > phys = msi_addr ;
msi_page - > iova = iova ;
2016-09-12 19:13:59 +03:00
list_add ( & msi_page - > list , & cookie - > msi_page_list ) ;
return msi_page ;
out_free_page :
kfree ( msi_page ) ;
return NULL ;
}
2019-05-01 16:58:19 +03:00
int iommu_dma_prepare_msi ( struct msi_desc * desc , phys_addr_t msi_addr )
2016-09-12 19:13:59 +03:00
{
2019-05-01 16:58:19 +03:00
struct device * dev = msi_desc_to_dev ( desc ) ;
2016-09-12 19:13:59 +03:00
struct iommu_domain * domain = iommu_get_domain_for_dev ( dev ) ;
struct iommu_dma_cookie * cookie ;
struct iommu_dma_msi_page * msi_page ;
unsigned long flags ;
2019-05-01 16:58:19 +03:00
if ( ! domain | | ! domain - > iova_cookie ) {
desc - > iommu_cookie = NULL ;
return 0 ;
}
2016-09-12 19:13:59 +03:00
cookie = domain - > iova_cookie ;
/*
* We disable IRQs to rule out a possible inversion against
* irq_desc_lock if , say , someone tries to retarget the affinity
* of an MSI from within an IPI handler .
*/
spin_lock_irqsave ( & cookie - > msi_lock , flags ) ;
msi_page = iommu_dma_get_msi_page ( dev , msi_addr , domain ) ;
spin_unlock_irqrestore ( & cookie - > msi_lock , flags ) ;
2019-05-01 16:58:19 +03:00
msi_desc_set_iommu_cookie ( desc , msi_page ) ;
if ( ! msi_page )
return - ENOMEM ;
return 0 ;
}
void iommu_dma_compose_msi_msg ( struct msi_desc * desc ,
struct msi_msg * msg )
{
struct device * dev = msi_desc_to_dev ( desc ) ;
const struct iommu_domain * domain = iommu_get_domain_for_dev ( dev ) ;
const struct iommu_dma_msi_page * msi_page ;
msi_page = msi_desc_get_iommu_cookie ( desc ) ;
if ( ! domain | | ! domain - > iova_cookie | | WARN_ON ( ! msi_page ) )
return ;
msg - > address_hi = upper_32_bits ( msi_page - > iova ) ;
msg - > address_lo & = cookie_msi_granule ( domain - > iova_cookie ) - 1 ;
msg - > address_lo + = lower_32_bits ( msi_page - > iova ) ;
2016-09-12 19:13:59 +03:00
}