2019-06-03 08:44:50 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-10-01 22:13:58 +03:00
/*
* A fairly generic DMA - API to IOMMU - API glue layer .
*
* Copyright ( C ) 2014 - 2015 ARM Ltd .
*
* based in part on arch / arm / mm / dma - mapping . c :
* Copyright ( C ) 2000 - 2004 Russell King
*/
2018-02-13 18:20:51 +03:00
# include <linux/acpi_iort.h>
2015-10-01 22:13:58 +03:00
# include <linux/device.h>
2019-05-20 10:29:29 +03:00
# include <linux/dma-contiguous.h>
2015-10-01 22:13:58 +03:00
# include <linux/dma-iommu.h>
2019-05-20 10:29:27 +03:00
# include <linux/dma-noncoherent.h>
2015-12-18 20:01:46 +03:00
# include <linux/gfp.h>
2015-10-01 22:13:58 +03:00
# include <linux/huge_mm.h>
# include <linux/iommu.h>
# include <linux/iova.h>
2016-09-12 19:13:59 +03:00
# include <linux/irq.h>
2015-10-01 22:13:58 +03:00
# include <linux/mm.h>
2016-09-12 19:14:00 +03:00
# include <linux/pci.h>
2015-12-18 20:01:46 +03:00
# include <linux/scatterlist.h>
# include <linux/vmalloc.h>
2015-10-01 22:13:58 +03:00
2016-09-12 19:13:59 +03:00
struct iommu_dma_msi_page {
struct list_head list ;
dma_addr_t iova ;
phys_addr_t phys ;
} ;
2017-01-19 23:57:46 +03:00
enum iommu_dma_cookie_type {
IOMMU_DMA_IOVA_COOKIE ,
IOMMU_DMA_MSI_COOKIE ,
} ;
2016-09-12 19:13:59 +03:00
struct iommu_dma_cookie {
2017-01-19 23:57:46 +03:00
enum iommu_dma_cookie_type type ;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
struct iova_domain iovad ;
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova ;
} ;
struct list_head msi_page_list ;
spinlock_t msi_lock ;
2018-09-20 19:10:22 +03:00
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain * fq_domain ;
2016-09-12 19:13:59 +03:00
} ;
2017-01-19 23:57:46 +03:00
static inline size_t cookie_msi_granule ( struct iommu_dma_cookie * cookie )
{
if ( cookie - > type = = IOMMU_DMA_IOVA_COOKIE )
return cookie - > iovad . granule ;
return PAGE_SIZE ;
}
static struct iommu_dma_cookie * cookie_alloc ( enum iommu_dma_cookie_type type )
{
struct iommu_dma_cookie * cookie ;
cookie = kzalloc ( sizeof ( * cookie ) , GFP_KERNEL ) ;
if ( cookie ) {
spin_lock_init ( & cookie - > msi_lock ) ;
INIT_LIST_HEAD ( & cookie - > msi_page_list ) ;
cookie - > type = type ;
}
return cookie ;
2016-09-12 19:13:59 +03:00
}
2015-10-01 22:13:58 +03:00
/**
* iommu_get_dma_cookie - Acquire DMA - API resources for a domain
* @ domain : IOMMU domain to prepare for DMA - API usage
*
* IOMMU drivers should normally call this from their domain_alloc
* callback when domain - > type = = IOMMU_DOMAIN_DMA .
*/
int iommu_get_dma_cookie ( struct iommu_domain * domain )
2017-01-19 23:57:46 +03:00
{
if ( domain - > iova_cookie )
return - EEXIST ;
domain - > iova_cookie = cookie_alloc ( IOMMU_DMA_IOVA_COOKIE ) ;
if ( ! domain - > iova_cookie )
return - ENOMEM ;
return 0 ;
}
EXPORT_SYMBOL ( iommu_get_dma_cookie ) ;
/**
* iommu_get_msi_cookie - Acquire just MSI remapping resources
* @ domain : IOMMU domain to prepare
* @ base : Start address of IOVA region for MSI mappings
*
* Users who manage their own IOVA allocation and do not want DMA API support ,
* but would still like to take advantage of automatic MSI remapping , can use
* this to initialise their own domain appropriately . Users should reserve a
* contiguous IOVA region , starting at @ base , large enough to accommodate the
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
* used by the devices attached to @ domain .
*/
int iommu_get_msi_cookie ( struct iommu_domain * domain , dma_addr_t base )
2015-10-01 22:13:58 +03:00
{
2016-09-12 19:13:59 +03:00
struct iommu_dma_cookie * cookie ;
2015-10-01 22:13:58 +03:00
2017-01-19 23:57:46 +03:00
if ( domain - > type ! = IOMMU_DOMAIN_UNMANAGED )
return - EINVAL ;
2015-10-01 22:13:58 +03:00
if ( domain - > iova_cookie )
return - EEXIST ;
2017-01-19 23:57:46 +03:00
cookie = cookie_alloc ( IOMMU_DMA_MSI_COOKIE ) ;
2016-09-12 19:13:59 +03:00
if ( ! cookie )
return - ENOMEM ;
2015-10-01 22:13:58 +03:00
2017-01-19 23:57:46 +03:00
cookie - > msi_iova = base ;
2016-09-12 19:13:59 +03:00
domain - > iova_cookie = cookie ;
return 0 ;
2015-10-01 22:13:58 +03:00
}
2017-01-19 23:57:46 +03:00
EXPORT_SYMBOL ( iommu_get_msi_cookie ) ;
2015-10-01 22:13:58 +03:00
/**
* iommu_put_dma_cookie - Release a domain ' s DMA mapping resources
2017-01-19 23:57:46 +03:00
* @ domain : IOMMU domain previously prepared by iommu_get_dma_cookie ( ) or
* iommu_get_msi_cookie ( )
2015-10-01 22:13:58 +03:00
*
* IOMMU drivers should normally call this from their domain_free callback .
*/
void iommu_put_dma_cookie ( struct iommu_domain * domain )
{
2016-09-12 19:13:59 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iommu_dma_msi_page * msi , * tmp ;
2015-10-01 22:13:58 +03:00
2016-09-12 19:13:59 +03:00
if ( ! cookie )
2015-10-01 22:13:58 +03:00
return ;
2017-01-19 23:57:46 +03:00
if ( cookie - > type = = IOMMU_DMA_IOVA_COOKIE & & cookie - > iovad . granule )
2016-09-12 19:13:59 +03:00
put_iova_domain ( & cookie - > iovad ) ;
list_for_each_entry_safe ( msi , tmp , & cookie - > msi_page_list , list ) {
list_del ( & msi - > list ) ;
kfree ( msi ) ;
}
kfree ( cookie ) ;
2015-10-01 22:13:58 +03:00
domain - > iova_cookie = NULL ;
}
EXPORT_SYMBOL ( iommu_put_dma_cookie ) ;
2017-03-16 20:00:19 +03:00
/**
* iommu_dma_get_resv_regions - Reserved region driver helper
* @ dev : Device from iommu_get_resv_regions ( )
* @ list : Reserved region list from iommu_get_resv_regions ( )
*
* IOMMU drivers can use this to implement their . get_resv_regions callback
2018-04-18 14:40:42 +03:00
* for general non - IOMMU - specific reservations . Currently , this covers GICv3
* ITS region reservation on ACPI based ARM platforms that may require HW MSI
* reservation .
2017-03-16 20:00:19 +03:00
*/
void iommu_dma_get_resv_regions ( struct device * dev , struct list_head * list )
2016-09-12 19:14:00 +03:00
{
2018-11-29 16:01:00 +03:00
if ( ! is_of_node ( dev_iommu_fwspec_get ( dev ) - > iommu_fwnode ) )
2018-04-18 14:40:42 +03:00
iort_iommu_msi_get_resv_regions ( dev , list ) ;
2017-03-16 20:00:19 +03:00
2016-09-12 19:14:00 +03:00
}
2017-03-16 20:00:19 +03:00
EXPORT_SYMBOL ( iommu_dma_get_resv_regions ) ;
2016-09-12 19:14:00 +03:00
2017-03-16 20:00:18 +03:00
static int cookie_init_hw_msi_region ( struct iommu_dma_cookie * cookie ,
phys_addr_t start , phys_addr_t end )
{
struct iova_domain * iovad = & cookie - > iovad ;
struct iommu_dma_msi_page * msi_page ;
int i , num_pages ;
start - = iova_offset ( iovad , start ) ;
num_pages = iova_align ( iovad , end - start ) > > iova_shift ( iovad ) ;
msi_page = kcalloc ( num_pages , sizeof ( * msi_page ) , GFP_KERNEL ) ;
if ( ! msi_page )
return - ENOMEM ;
for ( i = 0 ; i < num_pages ; i + + ) {
msi_page [ i ] . phys = start ;
msi_page [ i ] . iova = start ;
INIT_LIST_HEAD ( & msi_page [ i ] . list ) ;
list_add ( & msi_page [ i ] . list , & cookie - > msi_page_list ) ;
start + = iovad - > granule ;
}
return 0 ;
}
2019-05-03 17:05:33 +03:00
static int iova_reserve_pci_windows ( struct pci_dev * dev ,
2018-04-18 14:40:42 +03:00
struct iova_domain * iovad )
{
struct pci_host_bridge * bridge = pci_find_host_bridge ( dev - > bus ) ;
struct resource_entry * window ;
unsigned long lo , hi ;
2019-05-03 17:05:33 +03:00
phys_addr_t start = 0 , end ;
2018-04-18 14:40:42 +03:00
resource_list_for_each_entry ( window , & bridge - > windows ) {
if ( resource_type ( window - > res ) ! = IORESOURCE_MEM )
continue ;
lo = iova_pfn ( iovad , window - > res - > start - window - > offset ) ;
hi = iova_pfn ( iovad , window - > res - > end - window - > offset ) ;
reserve_iova ( iovad , lo , hi ) ;
}
2019-05-03 17:05:33 +03:00
/* Get reserved DMA windows from host bridge */
resource_list_for_each_entry ( window , & bridge - > dma_ranges ) {
end = window - > res - > start - window - > offset ;
resv_iova :
if ( end > start ) {
lo = iova_pfn ( iovad , start ) ;
hi = iova_pfn ( iovad , end ) ;
reserve_iova ( iovad , lo , hi ) ;
} else {
/* dma_ranges list should be sorted */
dev_err ( & dev - > dev , " Failed to reserve IOVA \n " ) ;
return - EINVAL ;
}
start = window - > res - > end - window - > offset + 1 ;
/* If window is last entry */
if ( window - > node . next = = & bridge - > dma_ranges & &
2019-06-17 16:30:54 +03:00
end ! = ~ ( phys_addr_t ) 0 ) {
end = ~ ( phys_addr_t ) 0 ;
2019-05-03 17:05:33 +03:00
goto resv_iova ;
}
}
return 0 ;
2018-04-18 14:40:42 +03:00
}
2017-03-16 20:00:18 +03:00
static int iova_reserve_iommu_regions ( struct device * dev ,
struct iommu_domain * domain )
{
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
struct iommu_resv_region * region ;
LIST_HEAD ( resv_regions ) ;
int ret = 0 ;
2019-05-03 17:05:33 +03:00
if ( dev_is_pci ( dev ) ) {
ret = iova_reserve_pci_windows ( to_pci_dev ( dev ) , iovad ) ;
if ( ret )
return ret ;
}
2018-04-18 14:40:42 +03:00
2017-03-16 20:00:18 +03:00
iommu_get_resv_regions ( dev , & resv_regions ) ;
list_for_each_entry ( region , & resv_regions , list ) {
unsigned long lo , hi ;
/* We ARE the software that manages these! */
if ( region - > type = = IOMMU_RESV_SW_MSI )
continue ;
lo = iova_pfn ( iovad , region - > start ) ;
hi = iova_pfn ( iovad , region - > start + region - > length - 1 ) ;
reserve_iova ( iovad , lo , hi ) ;
if ( region - > type = = IOMMU_RESV_MSI )
ret = cookie_init_hw_msi_region ( cookie , region - > start ,
region - > start + region - > length ) ;
if ( ret )
break ;
}
iommu_put_resv_regions ( dev , & resv_regions ) ;
return ret ;
}
2018-09-20 19:10:22 +03:00
static void iommu_dma_flush_iotlb_all ( struct iova_domain * iovad )
{
struct iommu_dma_cookie * cookie ;
struct iommu_domain * domain ;
cookie = container_of ( iovad , struct iommu_dma_cookie , iovad ) ;
domain = cookie - > fq_domain ;
/*
* The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
* implies that ops - > flush_iotlb_all must be non - NULL .
*/
domain - > ops - > flush_iotlb_all ( domain ) ;
}
2015-10-01 22:13:58 +03:00
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @ domain : IOMMU domain previously prepared by iommu_get_dma_cookie ( )
* @ base : IOVA at which the mappable address space starts
* @ size : Size of IOVA space
2016-09-12 19:14:00 +03:00
* @ dev : Device the domain is being initialised for
2015-10-01 22:13:58 +03:00
*
* @ base and @ size should be exact multiples of IOMMU page granularity to
* avoid rounding surprises . If necessary , we reserve the page at address 0
* to ensure it is an invalid IOVA . It is safe to reinitialise a domain , but
* any change which could make prior IOVAs invalid will fail .
*/
2019-05-20 10:29:29 +03:00
static int iommu_dma_init_domain ( struct iommu_domain * domain , dma_addr_t base ,
2016-09-12 19:14:00 +03:00
u64 size , struct device * dev )
2015-10-01 22:13:58 +03:00
{
2017-01-19 23:57:46 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
2019-01-24 10:10:02 +03:00
unsigned long order , base_pfn ;
2019-08-24 04:47:12 +03:00
struct iova_domain * iovad ;
2018-09-20 19:10:22 +03:00
int attr ;
2015-10-01 22:13:58 +03:00
2017-01-19 23:57:46 +03:00
if ( ! cookie | | cookie - > type ! = IOMMU_DMA_IOVA_COOKIE )
return - EINVAL ;
2015-10-01 22:13:58 +03:00
2019-08-24 04:47:12 +03:00
iovad = & cookie - > iovad ;
2015-10-01 22:13:58 +03:00
/* Use the smallest supported page size for IOVA granularity */
2016-04-07 20:42:06 +03:00
order = __ffs ( domain - > pgsize_bitmap ) ;
2015-10-01 22:13:58 +03:00
base_pfn = max_t ( unsigned long , 1 , base > > order ) ;
/* Check the domain allows at least some access to the device... */
if ( domain - > geometry . force_aperture ) {
if ( base > domain - > geometry . aperture_end | |
base + size < = domain - > geometry . aperture_start ) {
pr_warn ( " specified DMA range outside IOMMU capability \n " ) ;
return - EFAULT ;
}
/* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t ( unsigned long , base_pfn ,
domain - > geometry . aperture_start > > order ) ;
}
2017-01-16 16:24:54 +03:00
/* start_pfn is always nonzero for an already-initialised domain */
2015-10-01 22:13:58 +03:00
if ( iovad - > start_pfn ) {
if ( 1UL < < order ! = iovad - > granule | |
2017-01-16 16:24:54 +03:00
base_pfn ! = iovad - > start_pfn ) {
2015-10-01 22:13:58 +03:00
pr_warn ( " Incompatible range for DMA domain \n " ) ;
return - EFAULT ;
}
2017-03-16 20:00:18 +03:00
return 0 ;
2015-10-01 22:13:58 +03:00
}
2017-03-16 20:00:18 +03:00
2017-09-21 18:52:45 +03:00
init_iova_domain ( iovad , 1UL < < order , base_pfn ) ;
2018-09-20 19:10:22 +03:00
if ( ! cookie - > fq_domain & & ! iommu_domain_get_attr ( domain ,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE , & attr ) & & attr ) {
cookie - > fq_domain = domain ;
init_iova_flush_queue ( iovad , iommu_dma_flush_iotlb_all , NULL ) ;
}
2017-03-16 20:00:18 +03:00
if ( ! dev )
return 0 ;
return iova_reserve_iommu_regions ( dev , domain ) ;
2015-10-01 22:13:58 +03:00
}
/**
2017-01-06 16:28:12 +03:00
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags .
2015-10-01 22:13:58 +03:00
* @ dir : Direction of DMA transfer
* @ coherent : Is the DMA master cache - coherent ?
2017-01-06 16:28:12 +03:00
* @ attrs : DMA attributes for the mapping
2015-10-01 22:13:58 +03:00
*
* Return : corresponding IOMMU API page protection flags
*/
2019-05-20 10:29:29 +03:00
static int dma_info_to_prot ( enum dma_data_direction dir , bool coherent ,
2017-01-06 16:28:12 +03:00
unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
int prot = coherent ? IOMMU_CACHE : 0 ;
2017-01-06 16:28:12 +03:00
if ( attrs & DMA_ATTR_PRIVILEGED )
prot | = IOMMU_PRIV ;
2015-10-01 22:13:58 +03:00
switch ( dir ) {
case DMA_BIDIRECTIONAL :
return prot | IOMMU_READ | IOMMU_WRITE ;
case DMA_TO_DEVICE :
return prot | IOMMU_READ ;
case DMA_FROM_DEVICE :
return prot | IOMMU_WRITE ;
default :
return 0 ;
}
}
2017-03-31 17:46:05 +03:00
static dma_addr_t iommu_dma_alloc_iova ( struct iommu_domain * domain ,
size_t size , dma_addr_t dma_limit , struct device * dev )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:06 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2017-03-31 17:46:07 +03:00
unsigned long shift , iova_len , iova = 0 ;
2015-10-01 22:13:58 +03:00
2017-03-31 17:46:06 +03:00
if ( cookie - > type = = IOMMU_DMA_MSI_COOKIE ) {
cookie - > msi_iova + = size ;
return cookie - > msi_iova - size ;
}
shift = iova_shift ( iovad ) ;
iova_len = size > > shift ;
2017-03-31 17:46:07 +03:00
/*
* Freeing non - power - of - two - sized allocations back into the IOVA caches
* will come back to bite us badly , so we have to waste a bit of space
* rounding up anything cacheable to make sure that can ' t happen . The
* order of the unadjusted size will still match upon freeing .
*/
if ( iova_len < ( 1 < < ( IOVA_RANGE_CACHE_MAX_SIZE - 1 ) ) )
iova_len = roundup_pow_of_two ( iova_len ) ;
2017-03-31 17:46:06 +03:00
2018-07-24 01:16:10 +03:00
if ( dev - > bus_dma_mask )
dma_limit & = dev - > bus_dma_mask ;
2016-08-09 19:31:35 +03:00
if ( domain - > geometry . force_aperture )
dma_limit = min ( dma_limit , domain - > geometry . aperture_end ) ;
2017-01-16 16:24:55 +03:00
/* Try to get PCI devices a SAC address */
if ( dma_limit > DMA_BIT_MASK ( 32 ) & & dev_is_pci ( dev ) )
2017-09-20 11:52:02 +03:00
iova = alloc_iova_fast ( iovad , iova_len ,
DMA_BIT_MASK ( 32 ) > > shift , false ) ;
2017-03-31 17:46:07 +03:00
2017-01-16 16:24:55 +03:00
if ( ! iova )
2017-09-20 11:52:02 +03:00
iova = alloc_iova_fast ( iovad , iova_len , dma_limit > > shift ,
true ) ;
2017-01-16 16:24:55 +03:00
2017-03-31 17:46:07 +03:00
return ( dma_addr_t ) iova < < shift ;
2015-10-01 22:13:58 +03:00
}
2017-03-31 17:46:05 +03:00
static void iommu_dma_free_iova ( struct iommu_dma_cookie * cookie ,
dma_addr_t iova , size_t size )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:05 +03:00
struct iova_domain * iovad = & cookie - > iovad ;
2015-10-01 22:13:58 +03:00
2017-03-31 17:46:06 +03:00
/* The MSI case is only ever cleaning up its most recent allocation */
2017-03-31 17:46:07 +03:00
if ( cookie - > type = = IOMMU_DMA_MSI_COOKIE )
2017-03-31 17:46:06 +03:00
cookie - > msi_iova - = size ;
2018-09-20 19:10:22 +03:00
else if ( cookie - > fq_domain ) /* non-strict mode */
queue_iova ( iovad , iova_pfn ( iovad , iova ) ,
size > > iova_shift ( iovad ) , 0 ) ;
2017-03-31 17:46:07 +03:00
else
2017-05-15 18:01:30 +03:00
free_iova_fast ( iovad , iova_pfn ( iovad , iova ) ,
size > > iova_shift ( iovad ) ) ;
2017-03-31 17:46:05 +03:00
}
2019-05-20 10:29:31 +03:00
static void __iommu_dma_unmap ( struct device * dev , dma_addr_t dma_addr ,
2017-03-31 17:46:05 +03:00
size_t size )
{
2019-05-20 10:29:31 +03:00
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
2017-03-31 17:46:06 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2017-03-31 17:46:05 +03:00
size_t iova_off = iova_offset ( iovad , dma_addr ) ;
2019-07-02 18:43:48 +03:00
struct iommu_iotlb_gather iotlb_gather ;
size_t unmapped ;
2017-03-31 17:46:05 +03:00
dma_addr - = iova_off ;
size = iova_align ( iovad , size + iova_off ) ;
2019-07-02 18:43:48 +03:00
iommu_iotlb_gather_init ( & iotlb_gather ) ;
unmapped = iommu_unmap_fast ( domain , dma_addr , size , & iotlb_gather ) ;
WARN_ON ( unmapped ! = size ) ;
2017-03-31 17:46:05 +03:00
2018-09-20 19:10:22 +03:00
if ( ! cookie - > fq_domain )
2019-07-02 18:43:48 +03:00
iommu_tlb_sync ( domain , & iotlb_gather ) ;
2017-03-31 17:46:06 +03:00
iommu_dma_free_iova ( cookie , dma_addr , size ) ;
2015-10-01 22:13:58 +03:00
}
2019-05-20 10:29:30 +03:00
static dma_addr_t __iommu_dma_map ( struct device * dev , phys_addr_t phys ,
2019-05-20 10:29:31 +03:00
size_t size , int prot )
2019-05-20 10:29:30 +03:00
{
2019-05-20 10:29:31 +03:00
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
2019-05-20 10:29:30 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
2019-07-29 18:32:38 +03:00
struct iova_domain * iovad = & cookie - > iovad ;
size_t iova_off = iova_offset ( iovad , phys ) ;
2019-05-20 10:29:30 +03:00
dma_addr_t iova ;
2019-07-29 18:32:38 +03:00
size = iova_align ( iovad , size + iova_off ) ;
2019-05-20 10:29:30 +03:00
iova = iommu_dma_alloc_iova ( domain , size , dma_get_mask ( dev ) , dev ) ;
if ( ! iova )
return DMA_MAPPING_ERROR ;
if ( iommu_map ( domain , iova , phys - iova_off , size , prot ) ) {
iommu_dma_free_iova ( cookie , iova , size ) ;
return DMA_MAPPING_ERROR ;
}
return iova + iova_off ;
}
2015-10-01 22:13:58 +03:00
static void __iommu_dma_free_pages ( struct page * * pages , int count )
{
while ( count - - )
__free_page ( pages [ count ] ) ;
kvfree ( pages ) ;
}
2018-11-30 14:14:00 +03:00
static struct page * * __iommu_dma_alloc_pages ( struct device * dev ,
unsigned int count , unsigned long order_mask , gfp_t gfp )
2015-10-01 22:13:58 +03:00
{
struct page * * pages ;
2018-11-30 14:14:00 +03:00
unsigned int i = 0 , nid = dev_to_node ( dev ) ;
2016-04-13 19:29:10 +03:00
order_mask & = ( 2U < < MAX_ORDER ) - 1 ;
if ( ! order_mask )
return NULL ;
2015-10-01 22:13:58 +03:00
2018-11-30 14:14:00 +03:00
pages = kvzalloc ( count * sizeof ( * pages ) , GFP_KERNEL ) ;
2015-10-01 22:13:58 +03:00
if ( ! pages )
return NULL ;
/* IOMMU can map any pages, so himem can also be used here */
gfp | = __GFP_NOWARN | __GFP_HIGHMEM ;
while ( count ) {
struct page * page = NULL ;
2016-04-13 19:29:10 +03:00
unsigned int order_size ;
2015-10-01 22:13:58 +03:00
/*
* Higher - order allocations are a convenience rather
* than a necessity , hence using __GFP_NORETRY until
2016-04-13 19:29:10 +03:00
* falling back to minimum - order allocations .
2015-10-01 22:13:58 +03:00
*/
2016-04-13 19:29:10 +03:00
for ( order_mask & = ( 2U < < __fls ( count ) ) - 1 ;
order_mask ; order_mask & = ~ order_size ) {
unsigned int order = __fls ( order_mask ) ;
2018-11-30 14:14:00 +03:00
gfp_t alloc_flags = gfp ;
2016-04-13 19:29:10 +03:00
order_size = 1U < < order ;
2018-11-30 14:14:00 +03:00
if ( order_mask > order_size )
alloc_flags | = __GFP_NORETRY ;
page = alloc_pages_node ( nid , alloc_flags , order ) ;
2015-10-01 22:13:58 +03:00
if ( ! page )
continue ;
2016-04-13 19:29:10 +03:00
if ( ! order )
break ;
if ( ! PageCompound ( page ) ) {
2015-10-01 22:13:58 +03:00
split_page ( page , order ) ;
break ;
2016-04-13 19:29:10 +03:00
} else if ( ! split_huge_page ( page ) ) {
break ;
2015-10-01 22:13:58 +03:00
}
2016-04-13 19:29:10 +03:00
__free_pages ( page , order ) ;
2015-10-01 22:13:58 +03:00
}
if ( ! page ) {
__iommu_dma_free_pages ( pages , i ) ;
return NULL ;
}
2016-04-13 19:29:10 +03:00
count - = order_size ;
while ( order_size - - )
2015-10-01 22:13:58 +03:00
pages [ i + + ] = page + + ;
}
return pages ;
}
/**
2019-05-20 10:29:34 +03:00
* iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
2015-10-01 22:13:58 +03:00
* @ dev : Device to allocate memory for . Must be a real device
* attached to an iommu_dma_domain
* @ size : Size of buffer in bytes
2019-05-20 10:29:34 +03:00
* @ dma_handle : Out argument for allocated DMA handle
2015-10-01 22:13:58 +03:00
* @ gfp : Allocation flags
2016-04-13 19:29:10 +03:00
* @ attrs : DMA attributes for this allocation
2015-10-01 22:13:58 +03:00
*
* If @ size is less than PAGE_SIZE , then a full CPU page will be allocated ,
* but an IOMMU which supports smaller pages might not map the whole thing .
*
2019-05-20 10:29:34 +03:00
* Return : Mapped virtual address , or NULL on failure .
2015-10-01 22:13:58 +03:00
*/
2019-05-20 10:29:34 +03:00
static void * iommu_dma_alloc_remap ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t gfp , unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
2018-09-12 18:24:13 +03:00
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
2017-03-31 17:46:05 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2019-05-20 10:29:34 +03:00
bool coherent = dev_is_dma_coherent ( dev ) ;
int ioprot = dma_info_to_prot ( DMA_BIDIRECTIONAL , coherent , attrs ) ;
2019-07-26 10:26:40 +03:00
pgprot_t prot = dma_pgprot ( dev , PAGE_KERNEL , attrs ) ;
2019-05-20 10:29:34 +03:00
unsigned int count , min_size , alloc_sizes = domain - > pgsize_bitmap ;
2015-10-01 22:13:58 +03:00
struct page * * pages ;
struct sg_table sgt ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2019-05-20 10:29:34 +03:00
void * vaddr ;
2015-10-01 22:13:58 +03:00
2019-05-20 10:29:34 +03:00
* dma_handle = DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
2016-04-13 19:29:10 +03:00
min_size = alloc_sizes & - alloc_sizes ;
if ( min_size < PAGE_SIZE ) {
min_size = PAGE_SIZE ;
alloc_sizes | = PAGE_SIZE ;
} else {
size = ALIGN ( size , min_size ) ;
}
2016-08-03 23:46:00 +03:00
if ( attrs & DMA_ATTR_ALLOC_SINGLE_PAGES )
2016-04-13 19:29:10 +03:00
alloc_sizes = min_size ;
count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
2018-11-30 14:14:00 +03:00
pages = __iommu_dma_alloc_pages ( dev , count , alloc_sizes > > PAGE_SHIFT ,
gfp ) ;
2015-10-01 22:13:58 +03:00
if ( ! pages )
return NULL ;
2017-03-31 17:46:05 +03:00
size = iova_align ( iovad , size ) ;
iova = iommu_dma_alloc_iova ( domain , size , dev - > coherent_dma_mask , dev ) ;
2015-10-01 22:13:58 +03:00
if ( ! iova )
goto out_free_pages ;
if ( sg_alloc_table_from_pages ( & sgt , pages , count , 0 , size , GFP_KERNEL ) )
goto out_free_iova ;
2019-05-20 10:29:34 +03:00
if ( ! ( ioprot & IOMMU_CACHE ) ) {
2019-05-20 10:29:28 +03:00
struct scatterlist * sg ;
int i ;
for_each_sg ( sgt . sgl , sg , sgt . orig_nents , i )
arch_dma_prep_coherent ( sg_page ( sg ) , sg - > length ) ;
2015-10-01 22:13:58 +03:00
}
2019-05-20 10:29:34 +03:00
if ( iommu_map_sg ( domain , iova , sgt . sgl , sgt . orig_nents , ioprot )
2015-10-01 22:13:58 +03:00
< size )
goto out_free_sg ;
2019-08-30 09:51:01 +03:00
vaddr = dma_common_pages_remap ( pages , size , prot ,
2019-05-20 10:29:34 +03:00
__builtin_return_address ( 0 ) ) ;
if ( ! vaddr )
goto out_unmap ;
* dma_handle = iova ;
2015-10-01 22:13:58 +03:00
sg_free_table ( & sgt ) ;
2019-05-20 10:29:34 +03:00
return vaddr ;
2015-10-01 22:13:58 +03:00
2019-05-20 10:29:34 +03:00
out_unmap :
__iommu_dma_unmap ( dev , iova , size ) ;
2015-10-01 22:13:58 +03:00
out_free_sg :
sg_free_table ( & sgt ) ;
out_free_iova :
2017-03-31 17:46:05 +03:00
iommu_dma_free_iova ( cookie , iova , size ) ;
2015-10-01 22:13:58 +03:00
out_free_pages :
__iommu_dma_free_pages ( pages , count ) ;
return NULL ;
}
/**
2019-05-20 10:29:29 +03:00
* __iommu_dma_mmap - Map a buffer into provided user VMA
* @ pages : Array representing buffer from __iommu_dma_alloc ( )
2015-10-01 22:13:58 +03:00
* @ size : Size of buffer in bytes
* @ vma : VMA describing requested userspace mapping
*
* Maps the pages of the buffer in @ pages into @ vma . The caller is responsible
* for verifying the correct size and protection of @ vma beforehand .
*/
2019-05-20 10:29:29 +03:00
static int __iommu_dma_mmap ( struct page * * pages , size_t size ,
struct vm_area_struct * vma )
2015-10-01 22:13:58 +03:00
{
2019-05-14 03:22:15 +03:00
return vm_map_pages ( vma , pages , PAGE_ALIGN ( size ) > > PAGE_SHIFT ) ;
2015-10-01 22:13:58 +03:00
}
2019-05-20 10:29:29 +03:00
static void iommu_dma_sync_single_for_cpu ( struct device * dev ,
dma_addr_t dma_handle , size_t size , enum dma_data_direction dir )
2015-10-01 22:13:58 +03:00
{
2019-05-20 10:29:29 +03:00
phys_addr_t phys ;
2015-10-01 22:13:58 +03:00
2019-05-20 10:29:29 +03:00
if ( dev_is_dma_coherent ( dev ) )
return ;
2017-05-15 18:01:30 +03:00
2019-05-20 10:29:29 +03:00
phys = iommu_iova_to_phys ( iommu_get_dma_domain ( dev ) , dma_handle ) ;
2019-11-07 20:03:11 +03:00
arch_sync_dma_for_cpu ( phys , size , dir ) ;
2015-10-01 22:13:58 +03:00
}
2019-05-20 10:29:29 +03:00
static void iommu_dma_sync_single_for_device ( struct device * dev ,
dma_addr_t dma_handle , size_t size , enum dma_data_direction dir )
2015-10-01 22:13:58 +03:00
{
2019-05-20 10:29:29 +03:00
phys_addr_t phys ;
2015-10-01 22:13:58 +03:00
2019-05-20 10:29:29 +03:00
if ( dev_is_dma_coherent ( dev ) )
return ;
2017-05-15 18:01:30 +03:00
2019-05-20 10:29:29 +03:00
phys = iommu_iova_to_phys ( iommu_get_dma_domain ( dev ) , dma_handle ) ;
2019-11-07 20:03:11 +03:00
arch_sync_dma_for_device ( phys , size , dir ) ;
2019-05-20 10:29:29 +03:00
}
2015-10-01 22:13:58 +03:00
2019-05-20 10:29:29 +03:00
static void iommu_dma_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir )
{
struct scatterlist * sg ;
int i ;
if ( dev_is_dma_coherent ( dev ) )
return ;
for_each_sg ( sgl , sg , nelems , i )
2019-11-07 20:03:11 +03:00
arch_sync_dma_for_cpu ( sg_phys ( sg ) , sg - > length , dir ) ;
2019-05-20 10:29:29 +03:00
}
static void iommu_dma_sync_sg_for_device ( struct device * dev ,
struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir )
{
struct scatterlist * sg ;
int i ;
if ( dev_is_dma_coherent ( dev ) )
return ;
for_each_sg ( sgl , sg , nelems , i )
2019-11-07 20:03:11 +03:00
arch_sync_dma_for_device ( sg_phys ( sg ) , sg - > length , dir ) ;
2015-10-01 22:13:58 +03:00
}
2019-05-20 10:29:29 +03:00
static dma_addr_t iommu_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
unsigned long attrs )
2016-11-14 15:16:26 +03:00
{
2019-05-20 10:29:29 +03:00
phys_addr_t phys = page_to_phys ( page ) + offset ;
bool coherent = dev_is_dma_coherent ( dev ) ;
2019-05-20 10:29:31 +03:00
int prot = dma_info_to_prot ( dir , coherent , attrs ) ;
2019-05-20 10:29:29 +03:00
dma_addr_t dma_handle ;
2019-05-20 10:29:31 +03:00
dma_handle = __iommu_dma_map ( dev , phys , size , prot ) ;
2019-05-20 10:29:29 +03:00
if ( ! coherent & & ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) & &
dma_handle ! = DMA_MAPPING_ERROR )
2019-11-07 20:03:11 +03:00
arch_sync_dma_for_device ( phys , size , dir ) ;
2019-05-20 10:29:29 +03:00
return dma_handle ;
2016-11-14 15:16:26 +03:00
}
2019-05-20 10:29:29 +03:00
static void iommu_dma_unmap_page ( struct device * dev , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction dir , unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
2019-05-20 10:29:29 +03:00
if ( ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) )
iommu_dma_sync_single_for_cpu ( dev , dma_handle , size , dir ) ;
2019-05-20 10:29:31 +03:00
__iommu_dma_unmap ( dev , dma_handle , size ) ;
2015-10-01 22:13:58 +03:00
}
/*
* Prepare a successfully - mapped scatterlist to give back to the caller .
2016-04-11 14:32:31 +03:00
*
* At this point the segments are already laid out by iommu_dma_map_sg ( ) to
* avoid individually crossing any boundaries , so we merely need to check a
* segment ' s start address to avoid concatenating across one .
2015-10-01 22:13:58 +03:00
*/
static int __finalise_sg ( struct device * dev , struct scatterlist * sg , int nents ,
dma_addr_t dma_addr )
{
2016-04-11 14:32:31 +03:00
struct scatterlist * s , * cur = sg ;
unsigned long seg_mask = dma_get_seg_boundary ( dev ) ;
unsigned int cur_len = 0 , max_len = dma_get_max_seg_size ( dev ) ;
int i , count = 0 ;
2015-10-01 22:13:58 +03:00
for_each_sg ( sg , s , nents , i ) {
2016-04-11 14:32:31 +03:00
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address ( s ) ;
2015-10-01 22:13:58 +03:00
unsigned int s_length = sg_dma_len ( s ) ;
2016-04-11 14:32:31 +03:00
unsigned int s_iova_len = s - > length ;
2015-10-01 22:13:58 +03:00
2016-04-11 14:32:31 +03:00
s - > offset + = s_iova_off ;
2015-10-01 22:13:58 +03:00
s - > length = s_length ;
2018-11-21 21:35:19 +03:00
sg_dma_address ( s ) = DMA_MAPPING_ERROR ;
2016-04-11 14:32:31 +03:00
sg_dma_len ( s ) = 0 ;
/*
* Now fill in the real DMA data . If . . .
* - there is a valid output segment to append to
* - and this segment starts on an IOVA page boundary
* - but doesn ' t fall at a segment boundary
* - and wouldn ' t make the resulting output segment too long
*/
if ( cur_len & & ! s_iova_off & & ( dma_addr & seg_mask ) & &
2019-07-29 19:46:00 +03:00
( max_len - cur_len > = s_length ) ) {
2016-04-11 14:32:31 +03:00
/* ...then concatenate it with the previous one */
cur_len + = s_length ;
} else {
/* Otherwise start the next output segment */
if ( i > 0 )
cur = sg_next ( cur ) ;
cur_len = s_length ;
count + + ;
sg_dma_address ( cur ) = dma_addr + s_iova_off ;
}
sg_dma_len ( cur ) = cur_len ;
dma_addr + = s_iova_len ;
if ( s_length + s_iova_off < s_iova_len )
cur_len = 0 ;
2015-10-01 22:13:58 +03:00
}
2016-04-11 14:32:31 +03:00
return count ;
2015-10-01 22:13:58 +03:00
}
/*
* If mapping failed , then just restore the original list ,
* but making sure the DMA fields are invalidated .
*/
static void __invalidate_sg ( struct scatterlist * sg , int nents )
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nents , i ) {
2018-11-21 21:35:19 +03:00
if ( sg_dma_address ( s ) ! = DMA_MAPPING_ERROR )
2016-03-10 22:28:12 +03:00
s - > offset + = sg_dma_address ( s ) ;
2015-10-01 22:13:58 +03:00
if ( sg_dma_len ( s ) )
s - > length = sg_dma_len ( s ) ;
2018-11-21 21:35:19 +03:00
sg_dma_address ( s ) = DMA_MAPPING_ERROR ;
2015-10-01 22:13:58 +03:00
sg_dma_len ( s ) = 0 ;
}
}
/*
* The DMA API client is passing in a scatterlist which could describe
* any old buffer layout , but the IOMMU API requires everything to be
* aligned to IOMMU pages . Hence the need for this complicated bit of
* impedance - matching , to be able to hand off a suitably - aligned list ,
* but still preserve the original offsets and sizes for the caller .
*/
2019-05-20 10:29:29 +03:00
static int iommu_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir , unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
2018-09-12 18:24:13 +03:00
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
2017-03-31 17:46:05 +03:00
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iova_domain * iovad = & cookie - > iovad ;
2015-10-01 22:13:58 +03:00
struct scatterlist * s , * prev = NULL ;
2019-05-20 10:29:29 +03:00
int prot = dma_info_to_prot ( dir , dev_is_dma_coherent ( dev ) , attrs ) ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2015-10-01 22:13:58 +03:00
size_t iova_len = 0 ;
2016-04-11 14:32:31 +03:00
unsigned long mask = dma_get_seg_boundary ( dev ) ;
2015-10-01 22:13:58 +03:00
int i ;
2019-05-20 10:29:29 +03:00
if ( ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) )
iommu_dma_sync_sg_for_device ( dev , sg , nents , dir ) ;
2015-10-01 22:13:58 +03:00
/*
* Work out how much IOVA space we need , and align the segments to
* IOVA granules for the IOMMU driver to handle . With some clever
* trickery we can modify the list in - place , but reversibly , by
2016-04-11 14:32:31 +03:00
* stashing the unaligned parts in the as - yet - unused DMA fields .
2015-10-01 22:13:58 +03:00
*/
for_each_sg ( sg , s , nents , i ) {
2016-04-11 14:32:31 +03:00
size_t s_iova_off = iova_offset ( iovad , s - > offset ) ;
2015-10-01 22:13:58 +03:00
size_t s_length = s - > length ;
2016-04-11 14:32:31 +03:00
size_t pad_len = ( mask - iova_len + 1 ) & mask ;
2015-10-01 22:13:58 +03:00
2016-04-11 14:32:31 +03:00
sg_dma_address ( s ) = s_iova_off ;
2015-10-01 22:13:58 +03:00
sg_dma_len ( s ) = s_length ;
2016-04-11 14:32:31 +03:00
s - > offset - = s_iova_off ;
s_length = iova_align ( iovad , s_length + s_iova_off ) ;
2015-10-01 22:13:58 +03:00
s - > length = s_length ;
/*
2016-04-11 14:32:31 +03:00
* Due to the alignment of our single IOVA allocation , we can
* depend on these assumptions about the segment boundary mask :
* - If mask size > = IOVA size , then the IOVA range cannot
* possibly fall across a boundary , so we don ' t care .
* - If mask size < IOVA size , then the IOVA range must start
* exactly on a boundary , therefore we can lay things out
* based purely on segment lengths without needing to know
* the actual addresses beforehand .
* - The mask must be a power of 2 , so pad_len = = 0 if
* iova_len = = 0 , thus we cannot dereference prev the first
* time through here ( i . e . before it has a meaningful value ) .
2015-10-01 22:13:58 +03:00
*/
2016-04-11 14:32:31 +03:00
if ( pad_len & & pad_len < s_length - 1 ) {
2015-10-01 22:13:58 +03:00
prev - > length + = pad_len ;
iova_len + = pad_len ;
}
iova_len + = s_length ;
prev = s ;
}
2017-03-31 17:46:05 +03:00
iova = iommu_dma_alloc_iova ( domain , iova_len , dma_get_mask ( dev ) , dev ) ;
2015-10-01 22:13:58 +03:00
if ( ! iova )
goto out_restore_sg ;
/*
* We ' ll leave any physical concatenation to the IOMMU driver ' s
* implementation - it knows better than we do .
*/
2017-03-31 17:46:05 +03:00
if ( iommu_map_sg ( domain , iova , sg , nents , prot ) < iova_len )
2015-10-01 22:13:58 +03:00
goto out_free_iova ;
2017-03-31 17:46:05 +03:00
return __finalise_sg ( dev , sg , nents , iova ) ;
2015-10-01 22:13:58 +03:00
out_free_iova :
2017-03-31 17:46:05 +03:00
iommu_dma_free_iova ( cookie , iova , iova_len ) ;
2015-10-01 22:13:58 +03:00
out_restore_sg :
__invalidate_sg ( sg , nents ) ;
return 0 ;
}
2019-05-20 10:29:29 +03:00
static void iommu_dma_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir , unsigned long attrs )
2015-10-01 22:13:58 +03:00
{
2017-03-31 17:46:05 +03:00
dma_addr_t start , end ;
struct scatterlist * tmp ;
int i ;
2019-05-20 10:29:29 +03:00
2019-05-29 11:15:32 +03:00
if ( ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) )
2019-05-20 10:29:29 +03:00
iommu_dma_sync_sg_for_cpu ( dev , sg , nents , dir ) ;
2015-10-01 22:13:58 +03:00
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation , so this is incredibly easy .
*/
2017-03-31 17:46:05 +03:00
start = sg_dma_address ( sg ) ;
for_each_sg ( sg_next ( sg ) , tmp , nents - 1 , i ) {
if ( sg_dma_len ( tmp ) = = 0 )
break ;
sg = tmp ;
}
end = sg_dma_address ( sg ) + sg_dma_len ( sg ) ;
2019-05-20 10:29:31 +03:00
__iommu_dma_unmap ( dev , start , end - start ) ;
2015-10-01 22:13:58 +03:00
}
2019-05-20 10:29:29 +03:00
static dma_addr_t iommu_dma_map_resource ( struct device * dev , phys_addr_t phys ,
2016-11-14 15:16:26 +03:00
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
return __iommu_dma_map ( dev , phys , size ,
2019-05-20 10:29:31 +03:00
dma_info_to_prot ( dir , false , attrs ) | IOMMU_MMIO ) ;
2016-11-14 15:16:26 +03:00
}
2019-05-20 10:29:29 +03:00
static void iommu_dma_unmap_resource ( struct device * dev , dma_addr_t handle ,
2016-11-14 15:16:26 +03:00
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
2019-05-20 10:29:31 +03:00
__iommu_dma_unmap ( dev , handle , size ) ;
2016-11-14 15:16:26 +03:00
}
2019-05-20 10:29:40 +03:00
static void __iommu_dma_free ( struct device * dev , size_t size , void * cpu_addr )
2019-05-20 10:29:36 +03:00
{
size_t alloc_size = PAGE_ALIGN ( size ) ;
int count = alloc_size > > PAGE_SHIFT ;
struct page * page = NULL , * * pages = NULL ;
/* Non-coherent atomic allocation? Easy */
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_DIRECT_REMAP ) & &
dma_free_from_pool ( cpu_addr , alloc_size ) )
2019-05-20 10:29:36 +03:00
return ;
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_REMAP ) & & is_vmalloc_addr ( cpu_addr ) ) {
2019-05-20 10:29:36 +03:00
/*
* If it the address is remapped , then it ' s either non - coherent
* or highmem CMA , or an iommu_dma_alloc_remap ( ) construction .
*/
2019-06-03 10:14:31 +03:00
pages = dma_common_find_pages ( cpu_addr ) ;
2019-05-20 10:29:36 +03:00
if ( ! pages )
page = vmalloc_to_page ( cpu_addr ) ;
2019-08-30 09:51:01 +03:00
dma_common_free_remap ( cpu_addr , alloc_size ) ;
2019-05-20 10:29:36 +03:00
} else {
/* Lowmem means a coherent atomic or CMA allocation */
page = virt_to_page ( cpu_addr ) ;
}
if ( pages )
__iommu_dma_free_pages ( pages , count ) ;
2019-06-04 01:52:59 +03:00
if ( page )
dma_free_contiguous ( dev , page , alloc_size ) ;
2019-05-20 10:29:36 +03:00
}
2019-05-20 10:29:40 +03:00
static void iommu_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , unsigned long attrs )
{
__iommu_dma_unmap ( dev , handle , size ) ;
__iommu_dma_free ( dev , size , cpu_addr ) ;
}
2019-05-20 10:29:42 +03:00
static void * iommu_dma_alloc_pages ( struct device * dev , size_t size ,
struct page * * pagep , gfp_t gfp , unsigned long attrs )
2019-05-20 10:29:29 +03:00
{
bool coherent = dev_is_dma_coherent ( dev ) ;
2019-05-20 10:29:41 +03:00
size_t alloc_size = PAGE_ALIGN ( size ) ;
2019-08-20 05:45:49 +03:00
int node = dev_to_node ( dev ) ;
2019-05-20 10:29:39 +03:00
struct page * page = NULL ;
2019-05-20 10:29:41 +03:00
void * cpu_addr ;
2019-05-20 10:29:29 +03:00
2019-06-04 01:52:59 +03:00
page = dma_alloc_contiguous ( dev , alloc_size , gfp ) ;
2019-08-20 05:45:49 +03:00
if ( ! page )
page = alloc_pages_node ( node , gfp , get_order ( alloc_size ) ) ;
2019-05-20 10:29:37 +03:00
if ( ! page )
return NULL ;
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_REMAP ) & & ( ! coherent | | PageHighMem ( page ) ) ) {
2019-07-26 10:26:40 +03:00
pgprot_t prot = dma_pgprot ( dev , PAGE_KERNEL , attrs ) ;
2019-05-20 10:29:37 +03:00
2019-05-20 10:29:41 +03:00
cpu_addr = dma_common_contiguous_remap ( page , alloc_size ,
2019-08-30 09:51:01 +03:00
prot , __builtin_return_address ( 0 ) ) ;
2019-05-20 10:29:41 +03:00
if ( ! cpu_addr )
2019-05-20 10:29:42 +03:00
goto out_free_pages ;
2019-05-20 10:29:38 +03:00
if ( ! coherent )
2019-05-20 10:29:41 +03:00
arch_dma_prep_coherent ( page , size ) ;
2019-05-20 10:29:38 +03:00
} else {
2019-05-20 10:29:41 +03:00
cpu_addr = page_address ( page ) ;
2019-05-20 10:29:38 +03:00
}
2019-05-20 10:29:42 +03:00
* pagep = page ;
2019-05-20 10:29:41 +03:00
memset ( cpu_addr , 0 , alloc_size ) ;
return cpu_addr ;
2019-05-20 10:29:37 +03:00
out_free_pages :
2019-06-04 01:52:59 +03:00
dma_free_contiguous ( dev , page , alloc_size ) ;
2019-05-20 10:29:37 +03:00
return NULL ;
2019-05-20 10:29:29 +03:00
}
2019-05-20 10:29:42 +03:00
static void * iommu_dma_alloc ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
{
bool coherent = dev_is_dma_coherent ( dev ) ;
int ioprot = dma_info_to_prot ( DMA_BIDIRECTIONAL , coherent , attrs ) ;
struct page * page = NULL ;
void * cpu_addr ;
gfp | = __GFP_ZERO ;
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_REMAP ) & & gfpflags_allow_blocking ( gfp ) & &
2019-05-20 10:29:42 +03:00
! ( attrs & DMA_ATTR_FORCE_CONTIGUOUS ) )
return iommu_dma_alloc_remap ( dev , size , handle , gfp , attrs ) ;
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_DIRECT_REMAP ) & &
! gfpflags_allow_blocking ( gfp ) & & ! coherent )
2019-05-20 10:29:42 +03:00
cpu_addr = dma_alloc_from_pool ( PAGE_ALIGN ( size ) , & page , gfp ) ;
else
cpu_addr = iommu_dma_alloc_pages ( dev , size , & page , gfp , attrs ) ;
if ( ! cpu_addr )
return NULL ;
* handle = __iommu_dma_map ( dev , page_to_phys ( page ) , size , ioprot ) ;
if ( * handle = = DMA_MAPPING_ERROR ) {
__iommu_dma_free ( dev , size , cpu_addr ) ;
return NULL ;
}
return cpu_addr ;
}
2019-05-20 10:29:29 +03:00
static int iommu_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
unsigned long attrs )
{
unsigned long nr_pages = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
2019-05-20 10:29:44 +03:00
unsigned long pfn , off = vma - > vm_pgoff ;
2019-05-20 10:29:29 +03:00
int ret ;
2019-07-26 10:26:40 +03:00
vma - > vm_page_prot = dma_pgprot ( dev , vma - > vm_page_prot , attrs ) ;
2019-05-20 10:29:29 +03:00
if ( dma_mmap_from_dev_coherent ( dev , vma , cpu_addr , size , & ret ) )
return ret ;
if ( off > = nr_pages | | vma_pages ( vma ) > nr_pages - off )
return - ENXIO ;
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_REMAP ) & & is_vmalloc_addr ( cpu_addr ) ) {
2019-06-03 10:14:31 +03:00
struct page * * pages = dma_common_find_pages ( cpu_addr ) ;
2019-05-20 10:29:29 +03:00
2019-05-20 10:29:44 +03:00
if ( pages )
return __iommu_dma_mmap ( pages , size , vma ) ;
pfn = vmalloc_to_pfn ( cpu_addr ) ;
} else {
pfn = page_to_pfn ( virt_to_page ( cpu_addr ) ) ;
2019-05-20 10:29:29 +03:00
}
2019-05-20 10:29:44 +03:00
return remap_pfn_range ( vma , vma - > vm_start , pfn + off ,
vma - > vm_end - vma - > vm_start ,
vma - > vm_page_prot ) ;
2019-05-20 10:29:29 +03:00
}
static int iommu_dma_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
unsigned long attrs )
{
2019-05-20 10:29:43 +03:00
struct page * page ;
int ret ;
2019-05-20 10:29:29 +03:00
2019-05-20 10:29:45 +03:00
if ( IS_ENABLED ( CONFIG_DMA_REMAP ) & & is_vmalloc_addr ( cpu_addr ) ) {
2019-06-03 10:14:31 +03:00
struct page * * pages = dma_common_find_pages ( cpu_addr ) ;
2019-05-20 10:29:29 +03:00
2019-05-20 10:29:43 +03:00
if ( pages ) {
return sg_alloc_table_from_pages ( sgt , pages ,
PAGE_ALIGN ( size ) > > PAGE_SHIFT ,
0 , size , GFP_KERNEL ) ;
}
page = vmalloc_to_page ( cpu_addr ) ;
} else {
page = virt_to_page ( cpu_addr ) ;
2019-05-20 10:29:29 +03:00
}
2019-05-20 10:29:43 +03:00
ret = sg_alloc_table ( sgt , 1 , GFP_KERNEL ) ;
if ( ! ret )
sg_set_page ( sgt - > sgl , page , PAGE_ALIGN ( size ) , 0 ) ;
return ret ;
2019-05-20 10:29:29 +03:00
}
2019-08-28 15:35:41 +03:00
static unsigned long iommu_dma_get_merge_boundary ( struct device * dev )
{
struct iommu_domain * domain = iommu_get_dma_domain ( dev ) ;
return ( 1UL < < __ffs ( domain - > pgsize_bitmap ) ) - 1 ;
}
2019-05-20 10:29:29 +03:00
static const struct dma_map_ops iommu_dma_ops = {
. alloc = iommu_dma_alloc ,
. free = iommu_dma_free ,
. mmap = iommu_dma_mmap ,
. get_sgtable = iommu_dma_get_sgtable ,
. map_page = iommu_dma_map_page ,
. unmap_page = iommu_dma_unmap_page ,
. map_sg = iommu_dma_map_sg ,
. unmap_sg = iommu_dma_unmap_sg ,
. sync_single_for_cpu = iommu_dma_sync_single_for_cpu ,
. sync_single_for_device = iommu_dma_sync_single_for_device ,
. sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu ,
. sync_sg_for_device = iommu_dma_sync_sg_for_device ,
. map_resource = iommu_dma_map_resource ,
. unmap_resource = iommu_dma_unmap_resource ,
2019-08-28 15:35:41 +03:00
. get_merge_boundary = iommu_dma_get_merge_boundary ,
2019-05-20 10:29:29 +03:00
} ;
/*
* The IOMMU core code allocates the default DMA domain , which the underlying
* IOMMU driver needs to support via the dma - iommu layer .
*/
void iommu_setup_dma_ops ( struct device * dev , u64 dma_base , u64 size )
{
struct iommu_domain * domain = iommu_get_domain_for_dev ( dev ) ;
if ( ! domain )
goto out_err ;
/*
* The IOMMU core code allocates the default DMA domain , which the
* underlying IOMMU driver needs to support via the dma - iommu layer .
*/
if ( domain - > type = = IOMMU_DOMAIN_DMA ) {
if ( iommu_dma_init_domain ( domain , dma_base , size , dev ) )
goto out_err ;
dev - > dma_ops = & iommu_dma_ops ;
}
return ;
out_err :
pr_warn ( " Failed to set up IOMMU for device %s; retaining platform DMA ops \n " ,
dev_name ( dev ) ) ;
2016-11-14 15:16:26 +03:00
}
2016-09-12 19:13:59 +03:00
static struct iommu_dma_msi_page * iommu_dma_get_msi_page ( struct device * dev ,
phys_addr_t msi_addr , struct iommu_domain * domain )
{
struct iommu_dma_cookie * cookie = domain - > iova_cookie ;
struct iommu_dma_msi_page * msi_page ;
2017-03-31 17:46:05 +03:00
dma_addr_t iova ;
2016-09-12 19:13:59 +03:00
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO ;
2017-01-19 23:57:46 +03:00
size_t size = cookie_msi_granule ( cookie ) ;
2016-09-12 19:13:59 +03:00
2017-01-19 23:57:46 +03:00
msi_addr & = ~ ( phys_addr_t ) ( size - 1 ) ;
2016-09-12 19:13:59 +03:00
list_for_each_entry ( msi_page , & cookie - > msi_page_list , list )
if ( msi_page - > phys = = msi_addr )
return msi_page ;
msi_page = kzalloc ( sizeof ( * msi_page ) , GFP_ATOMIC ) ;
if ( ! msi_page )
return NULL ;
2019-07-29 18:32:38 +03:00
iova = iommu_dma_alloc_iova ( domain , size , dma_get_mask ( dev ) , dev ) ;
if ( ! iova )
2017-03-31 17:46:06 +03:00
goto out_free_page ;
2016-09-12 19:13:59 +03:00
2019-07-29 18:32:38 +03:00
if ( iommu_map ( domain , iova , msi_addr , size , prot ) )
goto out_free_iova ;
2016-09-12 19:13:59 +03:00
INIT_LIST_HEAD ( & msi_page - > list ) ;
2017-03-31 17:46:06 +03:00
msi_page - > phys = msi_addr ;
msi_page - > iova = iova ;
2016-09-12 19:13:59 +03:00
list_add ( & msi_page - > list , & cookie - > msi_page_list ) ;
return msi_page ;
2019-07-29 18:32:38 +03:00
out_free_iova :
iommu_dma_free_iova ( cookie , iova , size ) ;
2016-09-12 19:13:59 +03:00
out_free_page :
kfree ( msi_page ) ;
return NULL ;
}
2019-05-01 16:58:19 +03:00
int iommu_dma_prepare_msi ( struct msi_desc * desc , phys_addr_t msi_addr )
2016-09-12 19:13:59 +03:00
{
2019-05-01 16:58:19 +03:00
struct device * dev = msi_desc_to_dev ( desc ) ;
2016-09-12 19:13:59 +03:00
struct iommu_domain * domain = iommu_get_domain_for_dev ( dev ) ;
struct iommu_dma_cookie * cookie ;
struct iommu_dma_msi_page * msi_page ;
unsigned long flags ;
2019-05-01 16:58:19 +03:00
if ( ! domain | | ! domain - > iova_cookie ) {
desc - > iommu_cookie = NULL ;
return 0 ;
}
2016-09-12 19:13:59 +03:00
cookie = domain - > iova_cookie ;
/*
* We disable IRQs to rule out a possible inversion against
* irq_desc_lock if , say , someone tries to retarget the affinity
* of an MSI from within an IPI handler .
*/
spin_lock_irqsave ( & cookie - > msi_lock , flags ) ;
msi_page = iommu_dma_get_msi_page ( dev , msi_addr , domain ) ;
spin_unlock_irqrestore ( & cookie - > msi_lock , flags ) ;
2019-05-01 16:58:19 +03:00
msi_desc_set_iommu_cookie ( desc , msi_page ) ;
if ( ! msi_page )
return - ENOMEM ;
return 0 ;
}
void iommu_dma_compose_msi_msg ( struct msi_desc * desc ,
struct msi_msg * msg )
{
struct device * dev = msi_desc_to_dev ( desc ) ;
const struct iommu_domain * domain = iommu_get_domain_for_dev ( dev ) ;
const struct iommu_dma_msi_page * msi_page ;
msi_page = msi_desc_get_iommu_cookie ( desc ) ;
if ( ! domain | | ! domain - > iova_cookie | | WARN_ON ( ! msi_page ) )
return ;
msg - > address_hi = upper_32_bits ( msi_page - > iova ) ;
msg - > address_lo & = cookie_msi_granule ( domain - > iova_cookie ) - 1 ;
msg - > address_lo + = lower_32_bits ( msi_page - > iova ) ;
2016-09-12 19:13:59 +03:00
}
2019-05-20 10:29:29 +03:00
static int iommu_dma_init ( void )
{
return iova_cache_get ( ) ;
2016-09-12 19:13:59 +03:00
}
2019-05-20 10:29:29 +03:00
arch_initcall ( iommu_dma_init ) ;