2012-03-05 11:49:30 +00:00
/*
* SWIOTLB - based DMA API implementation
*
* Copyright ( C ) 2012 ARM Ltd .
* Author : Catalin Marinas < catalin . marinas @ arm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/gfp.h>
2015-11-16 17:25:48 +01:00
# include <linux/acpi.h>
2018-10-30 15:09:49 -07:00
# include <linux/memblock.h>
2016-08-15 14:45:46 +08:00
# include <linux/cache.h>
2012-03-05 11:49:30 +00:00
# include <linux/export.h>
# include <linux/slab.h>
2014-10-09 15:26:44 -07:00
# include <linux/genalloc.h>
2018-01-10 16:21:13 +01:00
# include <linux/dma-direct.h>
2018-10-08 09:12:01 +02:00
# include <linux/dma-noncoherent.h>
2013-12-12 19:28:33 +00:00
# include <linux/dma-contiguous.h>
2012-03-05 11:49:30 +00:00
# include <linux/vmalloc.h>
# include <linux/swiotlb.h>
2017-04-26 15:46:20 +02:00
# include <linux/pci.h>
2012-03-05 11:49:30 +00:00
# include <asm/cacheflush.h>
2018-10-08 09:12:01 +02:00
pgprot_t arch_dma_mmap_pgprot ( struct device * dev , pgprot_t prot ,
unsigned long attrs )
2013-05-21 17:35:19 +01:00
{
2018-10-08 09:12:01 +02:00
if ( ! dev_is_dma_coherent ( dev ) | | ( attrs & DMA_ATTR_WRITE_COMBINE ) )
return pgprot_writecombine ( prot ) ;
return prot ;
2013-05-21 17:35:19 +01:00
}
2018-10-08 09:12:01 +02:00
void arch_sync_dma_for_device ( struct device * dev , phys_addr_t paddr ,
size_t size , enum dma_data_direction dir )
2013-05-21 17:35:19 +01:00
{
2018-10-08 09:12:01 +02:00
__dma_map_area ( phys_to_virt ( paddr ) , size , dir ) ;
2013-05-21 17:35:19 +01:00
}
2018-10-08 09:12:01 +02:00
void arch_sync_dma_for_cpu ( struct device * dev , phys_addr_t paddr ,
size_t size , enum dma_data_direction dir )
2013-05-21 17:35:19 +01:00
{
2018-10-08 09:12:01 +02:00
__dma_unmap_area ( phys_to_virt ( paddr ) , size , dir ) ;
2013-05-21 17:35:19 +01:00
}
2018-11-04 20:29:28 +01:00
void arch_dma_prep_coherent ( struct page * page , size_t size )
{
__dma_flush_area ( page_address ( page ) , size ) ;
}
2018-10-30 09:41:29 +02:00
# ifdef CONFIG_IOMMU_DMA
2018-10-08 09:12:01 +02:00
static int __swiotlb_get_sgtable_page ( struct sg_table * sgt ,
struct page * page , size_t size )
2013-05-21 17:35:19 +01:00
{
2018-10-08 09:12:01 +02:00
int ret = sg_alloc_table ( sgt , 1 , GFP_KERNEL ) ;
2013-05-21 17:35:19 +01:00
2018-10-08 09:12:01 +02:00
if ( ! ret )
sg_set_page ( sgt - > sgl , page , PAGE_ALIGN ( size ) , 0 ) ;
2013-05-21 17:35:19 +01:00
2018-10-08 09:12:01 +02:00
return ret ;
2013-05-21 17:35:19 +01:00
}
2017-04-25 15:42:31 +01:00
static int __swiotlb_mmap_pfn ( struct vm_area_struct * vma ,
unsigned long pfn , size_t size )
2014-03-14 19:52:23 +00:00
{
int ret = - ENXIO ;
2017-09-21 00:29:36 +02:00
unsigned long nr_vma_pages = vma_pages ( vma ) ;
2014-03-14 19:52:23 +00:00
unsigned long nr_pages = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
unsigned long off = vma - > vm_pgoff ;
if ( off < nr_pages & & nr_vma_pages < = ( nr_pages - off ) ) {
ret = remap_pfn_range ( vma , vma - > vm_start ,
pfn + off ,
vma - > vm_end - vma - > vm_start ,
vma - > vm_page_prot ) ;
}
return ret ;
}
2018-10-30 09:41:29 +02:00
# endif /* CONFIG_IOMMU_DMA */
2014-03-14 19:52:23 +00:00
2015-02-05 18:01:53 +00:00
static int __init arm64_dma_init ( void )
2012-03-05 11:49:30 +00:00
{
2018-05-11 13:33:12 +01:00
WARN_TAINT ( ARCH_DMA_MINALIGN < cache_line_size ( ) ,
TAINT_CPU_OUT_OF_SPEC ,
" ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d) " ,
ARCH_DMA_MINALIGN , cache_line_size ( ) ) ;
2018-11-04 20:29:28 +01:00
return dma_atomic_pool_init ( GFP_DMA32 , __pgprot ( PROT_NORMAL_NC ) ) ;
2014-10-09 15:26:44 -07:00
}
arch_initcall ( arm64_dma_init ) ;
2012-03-05 11:49:30 +00:00
2015-10-01 20:13:59 +01:00
# ifdef CONFIG_IOMMU_DMA
# include <linux/dma-iommu.h>
# include <linux/platform_device.h>
# include <linux/amba/bus.h>
/* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page ( struct device * dev , const void * virt , phys_addr_t phys )
{
2016-08-02 09:50:50 +09:00
__dma_flush_area ( virt , PAGE_SIZE ) ;
2015-10-01 20:13:59 +01:00
}
static void * __iommu_alloc_attrs ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
2018-10-08 09:12:01 +02:00
bool coherent = dev_is_dma_coherent ( dev ) ;
2017-01-06 18:58:12 +05:30
int ioprot = dma_info_to_prot ( DMA_BIDIRECTIONAL , coherent , attrs ) ;
2015-11-04 13:23:52 +00:00
size_t iosize = size ;
2015-10-01 20:13:59 +01:00
void * addr ;
if ( WARN ( ! dev , " cannot create IOMMU mapping for unknown device \n " ) )
return NULL ;
2015-11-04 13:23:52 +00:00
size = PAGE_ALIGN ( size ) ;
2015-10-01 20:13:59 +01:00
/*
* Some drivers rely on this , and we probably don ' t want the
* possibility of stale kernel data being read by devices anyway .
*/
gfp | = __GFP_ZERO ;
2017-03-07 18:43:32 +01:00
if ( ! gfpflags_allow_blocking ( gfp ) ) {
2015-10-01 20:13:59 +01:00
struct page * page ;
/*
* In atomic context we can ' t remap anything , so we ' ll only
* get the virtually contiguous buffer we need by way of a
* physically contiguous allocation .
*/
if ( coherent ) {
page = alloc_pages ( gfp , get_order ( size ) ) ;
addr = page ? page_address ( page ) : NULL ;
} else {
2018-11-04 20:29:28 +01:00
addr = dma_alloc_from_pool ( size , & page , gfp ) ;
2015-10-01 20:13:59 +01:00
}
if ( ! addr )
return NULL ;
2015-11-04 13:23:52 +00:00
* handle = iommu_dma_map_page ( dev , page , 0 , iosize , ioprot ) ;
2018-11-21 19:35:19 +01:00
if ( * handle = = DMA_MAPPING_ERROR ) {
2015-10-01 20:13:59 +01:00
if ( coherent )
__free_pages ( page , get_order ( size ) ) ;
else
2018-11-04 20:29:28 +01:00
dma_free_from_pool ( addr , size ) ;
2015-10-01 20:13:59 +01:00
addr = NULL ;
}
2017-03-07 18:43:32 +01:00
} else if ( attrs & DMA_ATTR_FORCE_CONTIGUOUS ) {
2018-10-08 09:12:01 +02:00
pgprot_t prot = arch_dma_mmap_pgprot ( dev , PAGE_KERNEL , attrs ) ;
2017-03-07 18:43:32 +01:00
struct page * page ;
page = dma_alloc_from_contiguous ( dev , size > > PAGE_SHIFT ,
2018-08-17 15:49:00 -07:00
get_order ( size ) , gfp & __GFP_NOWARN ) ;
2017-03-07 18:43:32 +01:00
if ( ! page )
return NULL ;
* handle = iommu_dma_map_page ( dev , page , 0 , iosize , ioprot ) ;
2018-11-21 19:35:19 +01:00
if ( * handle = = DMA_MAPPING_ERROR ) {
2017-03-07 18:43:32 +01:00
dma_release_from_contiguous ( dev , page ,
size > > PAGE_SHIFT ) ;
return NULL ;
}
addr = dma_common_contiguous_remap ( page , size , VM_USERMAP ,
prot ,
__builtin_return_address ( 0 ) ) ;
2018-06-12 13:08:40 +02:00
if ( addr ) {
if ( ! coherent )
__dma_flush_area ( page_to_virt ( page ) , iosize ) ;
2018-12-10 19:33:31 +00:00
memset ( addr , 0 , size ) ;
2018-06-12 13:08:40 +02:00
} else {
2017-03-07 18:43:32 +01:00
iommu_dma_unmap_page ( dev , * handle , iosize , 0 , attrs ) ;
dma_release_from_contiguous ( dev , page ,
size > > PAGE_SHIFT ) ;
}
} else {
2018-10-08 09:12:01 +02:00
pgprot_t prot = arch_dma_mmap_pgprot ( dev , PAGE_KERNEL , attrs ) ;
2017-03-07 18:43:32 +01:00
struct page * * pages ;
pages = iommu_dma_alloc ( dev , iosize , gfp , attrs , ioprot ,
handle , flush_page ) ;
if ( ! pages )
return NULL ;
addr = dma_common_pages_remap ( pages , size , VM_USERMAP , prot ,
__builtin_return_address ( 0 ) ) ;
if ( ! addr )
iommu_dma_free ( dev , pages , iosize , handle ) ;
2015-10-01 20:13:59 +01:00
}
return addr ;
}
static void __iommu_free_attrs ( struct device * dev , size_t size , void * cpu_addr ,
2016-08-03 13:46:00 -07:00
dma_addr_t handle , unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
2015-11-04 13:23:52 +00:00
size_t iosize = size ;
size = PAGE_ALIGN ( size ) ;
2015-10-01 20:13:59 +01:00
/*
2017-03-07 18:43:32 +01:00
* @ cpu_addr will be one of 4 things depending on how it was allocated :
* - A remapped array of pages for contiguous allocations .
2015-10-01 20:13:59 +01:00
* - A remapped array of pages from iommu_dma_alloc ( ) , for all
* non - atomic allocations .
* - A non - cacheable alias from the atomic pool , for atomic
* allocations by non - coherent devices .
* - A normal lowmem address , for atomic allocations by
* coherent devices .
* Hence how dodgy the below logic looks . . .
*/
2018-11-04 20:29:28 +01:00
if ( dma_in_atomic_pool ( cpu_addr , size ) ) {
2016-08-03 13:46:00 -07:00
iommu_dma_unmap_page ( dev , handle , iosize , 0 , 0 ) ;
2018-11-04 20:29:28 +01:00
dma_free_from_pool ( cpu_addr , size ) ;
2017-03-07 18:43:32 +01:00
} else if ( attrs & DMA_ATTR_FORCE_CONTIGUOUS ) {
struct page * page = vmalloc_to_page ( cpu_addr ) ;
iommu_dma_unmap_page ( dev , handle , iosize , 0 , attrs ) ;
dma_release_from_contiguous ( dev , page , size > > PAGE_SHIFT ) ;
dma_common_free_remap ( cpu_addr , size , VM_USERMAP ) ;
2015-10-01 20:13:59 +01:00
} else if ( is_vmalloc_addr ( cpu_addr ) ) {
struct vm_struct * area = find_vm_area ( cpu_addr ) ;
if ( WARN_ON ( ! area | | ! area - > pages ) )
return ;
2015-11-04 13:23:52 +00:00
iommu_dma_free ( dev , area - > pages , iosize , & handle ) ;
2015-10-01 20:13:59 +01:00
dma_common_free_remap ( cpu_addr , size , VM_USERMAP ) ;
} else {
2016-08-03 13:46:00 -07:00
iommu_dma_unmap_page ( dev , handle , iosize , 0 , 0 ) ;
2015-10-01 20:13:59 +01:00
__free_pages ( virt_to_page ( cpu_addr ) , get_order ( size ) ) ;
}
}
static int __iommu_mmap_attrs ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
struct vm_struct * area ;
int ret ;
2018-10-08 09:12:01 +02:00
vma - > vm_page_prot = arch_dma_mmap_pgprot ( dev , vma - > vm_page_prot , attrs ) ;
2015-10-01 20:13:59 +01:00
2017-07-20 11:19:58 +01:00
if ( dma_mmap_from_dev_coherent ( dev , vma , cpu_addr , size , & ret ) )
2015-10-01 20:13:59 +01:00
return ret ;
2017-04-25 15:42:31 +01:00
if ( attrs & DMA_ATTR_FORCE_CONTIGUOUS ) {
/*
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped ,
* hence in the vmalloc space .
*/
unsigned long pfn = vmalloc_to_pfn ( cpu_addr ) ;
return __swiotlb_mmap_pfn ( vma , pfn , size ) ;
}
2015-10-01 20:13:59 +01:00
area = find_vm_area ( cpu_addr ) ;
if ( WARN_ON ( ! area | | ! area - > pages ) )
return - ENXIO ;
return iommu_dma_mmap ( area - > pages , size , vma ) ;
}
static int __iommu_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t dma_addr ,
2016-08-03 13:46:00 -07:00
size_t size , unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
unsigned int count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
struct vm_struct * area = find_vm_area ( cpu_addr ) ;
2017-04-25 15:42:31 +01:00
if ( attrs & DMA_ATTR_FORCE_CONTIGUOUS ) {
/*
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped ,
* hence in the vmalloc space .
*/
struct page * page = vmalloc_to_page ( cpu_addr ) ;
return __swiotlb_get_sgtable_page ( sgt , page , size ) ;
}
2015-10-01 20:13:59 +01:00
if ( WARN_ON ( ! area | | ! area - > pages ) )
return - ENXIO ;
return sg_alloc_table_from_pages ( sgt , area - > pages , count , 0 , size ,
GFP_KERNEL ) ;
}
static void __iommu_sync_single_for_cpu ( struct device * dev ,
dma_addr_t dev_addr , size_t size ,
enum dma_data_direction dir )
{
phys_addr_t phys ;
2018-10-08 09:12:01 +02:00
if ( dev_is_dma_coherent ( dev ) )
2015-10-01 20:13:59 +01:00
return ;
2018-09-12 16:24:14 +01:00
phys = iommu_iova_to_phys ( iommu_get_dma_domain ( dev ) , dev_addr ) ;
2018-10-08 09:12:01 +02:00
arch_sync_dma_for_cpu ( dev , phys , size , dir ) ;
2015-10-01 20:13:59 +01:00
}
static void __iommu_sync_single_for_device ( struct device * dev ,
dma_addr_t dev_addr , size_t size ,
enum dma_data_direction dir )
{
phys_addr_t phys ;
2018-10-08 09:12:01 +02:00
if ( dev_is_dma_coherent ( dev ) )
2015-10-01 20:13:59 +01:00
return ;
2018-09-12 16:24:14 +01:00
phys = iommu_iova_to_phys ( iommu_get_dma_domain ( dev ) , dev_addr ) ;
2018-10-08 09:12:01 +02:00
arch_sync_dma_for_device ( dev , phys , size , dir ) ;
2015-10-01 20:13:59 +01:00
}
static dma_addr_t __iommu_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
2018-10-08 09:12:01 +02:00
bool coherent = dev_is_dma_coherent ( dev ) ;
2017-01-06 18:58:12 +05:30
int prot = dma_info_to_prot ( dir , coherent , attrs ) ;
2015-10-01 20:13:59 +01:00
dma_addr_t dev_addr = iommu_dma_map_page ( dev , page , offset , size , prot ) ;
2018-09-12 16:24:14 +01:00
if ( ! coherent & & ! ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) & &
2018-11-21 19:35:19 +01:00
dev_addr ! = DMA_MAPPING_ERROR )
2018-09-12 16:24:14 +01:00
__dma_map_area ( page_address ( page ) + offset , size , dir ) ;
2015-10-01 20:13:59 +01:00
return dev_addr ;
}
static void __iommu_unmap_page ( struct device * dev , dma_addr_t dev_addr ,
size_t size , enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
2016-08-03 13:46:00 -07:00
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
2015-10-01 20:13:59 +01:00
__iommu_sync_single_for_cpu ( dev , dev_addr , size , dir ) ;
iommu_dma_unmap_page ( dev , dev_addr , size , dir , attrs ) ;
}
static void __iommu_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir )
{
struct scatterlist * sg ;
int i ;
2018-10-08 09:12:01 +02:00
if ( dev_is_dma_coherent ( dev ) )
2015-10-01 20:13:59 +01:00
return ;
for_each_sg ( sgl , sg , nelems , i )
2018-10-08 09:12:01 +02:00
arch_sync_dma_for_cpu ( dev , sg_phys ( sg ) , sg - > length , dir ) ;
2015-10-01 20:13:59 +01:00
}
static void __iommu_sync_sg_for_device ( struct device * dev ,
struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir )
{
struct scatterlist * sg ;
int i ;
2018-10-08 09:12:01 +02:00
if ( dev_is_dma_coherent ( dev ) )
2015-10-01 20:13:59 +01:00
return ;
for_each_sg ( sgl , sg , nelems , i )
2018-10-08 09:12:01 +02:00
arch_sync_dma_for_device ( dev , sg_phys ( sg ) , sg - > length , dir ) ;
2015-10-01 20:13:59 +01:00
}
static int __iommu_map_sg_attrs ( struct device * dev , struct scatterlist * sgl ,
int nelems , enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
2018-10-08 09:12:01 +02:00
bool coherent = dev_is_dma_coherent ( dev ) ;
2015-10-01 20:13:59 +01:00
2016-08-03 13:46:00 -07:00
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
2015-10-01 20:13:59 +01:00
__iommu_sync_sg_for_device ( dev , sgl , nelems , dir ) ;
return iommu_dma_map_sg ( dev , sgl , nelems ,
2017-01-06 18:58:12 +05:30
dma_info_to_prot ( dir , coherent , attrs ) ) ;
2015-10-01 20:13:59 +01:00
}
static void __iommu_unmap_sg_attrs ( struct device * dev ,
struct scatterlist * sgl , int nelems ,
enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2015-10-01 20:13:59 +01:00
{
2016-08-03 13:46:00 -07:00
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
2015-10-01 20:13:59 +01:00
__iommu_sync_sg_for_cpu ( dev , sgl , nelems , dir ) ;
iommu_dma_unmap_sg ( dev , sgl , nelems , dir , attrs ) ;
}
2017-01-20 13:04:01 -08:00
static const struct dma_map_ops iommu_dma_ops = {
2015-10-01 20:13:59 +01:00
. alloc = __iommu_alloc_attrs ,
. free = __iommu_free_attrs ,
. mmap = __iommu_mmap_attrs ,
. get_sgtable = __iommu_get_sgtable ,
. map_page = __iommu_map_page ,
. unmap_page = __iommu_unmap_page ,
. map_sg = __iommu_map_sg_attrs ,
. unmap_sg = __iommu_unmap_sg_attrs ,
. sync_single_for_cpu = __iommu_sync_single_for_cpu ,
. sync_single_for_device = __iommu_sync_single_for_device ,
. sync_sg_for_cpu = __iommu_sync_sg_for_cpu ,
. sync_sg_for_device = __iommu_sync_sg_for_device ,
2016-11-14 12:16:27 +00:00
. map_resource = iommu_dma_map_resource ,
. unmap_resource = iommu_dma_unmap_resource ,
2015-10-01 20:13:59 +01:00
} ;
2017-04-10 16:51:04 +05:30
static int __init __iommu_dma_init ( void )
{
return iommu_dma_init ( ) ;
}
arch_initcall ( __iommu_dma_init ) ;
2015-10-01 20:13:59 +01:00
2017-04-10 16:51:04 +05:30
static void __iommu_setup_dma_ops ( struct device * dev , u64 dma_base , u64 size ,
const struct iommu_ops * ops )
2015-10-01 20:13:59 +01:00
{
2017-04-10 16:51:04 +05:30
struct iommu_domain * domain ;
if ( ! ops )
return ;
2015-10-01 20:13:59 +01:00
/*
2017-04-10 16:51:04 +05:30
* The IOMMU core code allocates the default DMA domain , which the
* underlying IOMMU driver needs to support via the dma - iommu layer .
2015-10-01 20:13:59 +01:00
*/
2017-04-10 16:51:04 +05:30
domain = iommu_get_domain_for_dev ( dev ) ;
2017-01-06 10:49:12 +00:00
if ( ! domain )
goto out_err ;
if ( domain - > type = = IOMMU_DOMAIN_DMA ) {
if ( iommu_dma_init_domain ( domain , dma_base , size , dev ) )
goto out_err ;
2017-02-25 13:45:43 -08:00
dev - > dma_ops = & iommu_dma_ops ;
2015-10-01 20:13:59 +01:00
}
2017-04-10 16:51:04 +05:30
return ;
2017-01-06 10:49:12 +00:00
out_err :
2017-04-10 16:51:04 +05:30
pr_warn ( " Failed to set up IOMMU for device %s; retaining platform DMA ops \n " ,
2017-01-06 10:49:12 +00:00
dev_name ( dev ) ) ;
2015-10-01 20:13:59 +01:00
}
2015-10-01 20:14:00 +01:00
void arch_teardown_dma_ops ( struct device * dev )
{
2017-01-20 13:04:02 -08:00
dev - > dma_ops = NULL ;
2015-10-01 20:14:00 +01:00
}
2015-10-01 20:13:59 +01:00
# else
static void __iommu_setup_dma_ops ( struct device * dev , u64 dma_base , u64 size ,
2016-04-07 18:42:05 +01:00
const struct iommu_ops * iommu )
2015-10-01 20:13:59 +01:00
{ }
# endif /* CONFIG_IOMMU_DMA */
2015-10-01 20:14:00 +01:00
void arch_setup_dma_ops ( struct device * dev , u64 dma_base , u64 size ,
2016-04-07 18:42:05 +01:00
const struct iommu_ops * iommu , bool coherent )
2015-10-01 20:14:00 +01:00
{
2018-10-08 09:12:01 +02:00
dev - > dma_coherent = coherent ;
2015-10-01 20:14:00 +01:00
__iommu_setup_dma_ops ( dev , dma_base , size , iommu ) ;
2017-04-13 14:04:21 -07:00
# ifdef CONFIG_XEN
if ( xen_initial_domain ( ) ) {
dev - > archdata . dev_dma_ops = dev - > dma_ops ;
dev - > dma_ops = xen_dma_ops ;
}
# endif
2015-10-01 20:14:00 +01:00
}