2005-04-16 15:20:36 -07:00
# ifndef ASMARM_DMA_MAPPING_H
# define ASMARM_DMA_MAPPING_H
# ifdef __KERNEL__
2008-08-10 12:10:49 +01:00
# include <linux/mm_types.h>
2007-10-23 12:37:59 +02:00
# include <linux/scatterlist.h>
2011-01-03 11:29:28 +00:00
# include <linux/dma-debug.h>
2005-04-16 15:20:36 -07:00
2008-08-10 12:10:49 +01:00
# include <asm/memory.h>
2013-10-18 16:01:26 +00:00
# include <xen/xen.h>
# include <asm/xen/hypervisor.h>
2017-01-20 13:04:01 -08:00
extern const struct dma_map_ops arm_dma_ops ;
extern const struct dma_map_ops arm_coherent_dma_ops ;
2012-02-10 19:55:20 +01:00
2017-01-20 13:04:04 -08:00
static inline const struct dma_map_ops * get_arch_dma_ops ( struct bus_type * bus )
2013-10-18 16:01:26 +00:00
{
2017-05-24 11:24:30 +01:00
return IS_ENABLED ( CONFIG_MMU ) ? & arm_dma_ops : & dma_noop_ops ;
2013-10-18 16:01:26 +00:00
}
2011-01-03 00:00:17 +00:00
# ifdef __arch_page_to_dma
# error Please update to __arch_pfn_to_dma
# endif
2008-08-10 12:10:49 +01:00
/*
2011-01-03 00:00:17 +00:00
* dma_to_pfn / pfn_to_dma / dma_to_virt / virt_to_dma are architecture private
* functions used internally by the DMA - mapping API to provide DMA
* addresses . They must not be used by drivers .
2008-08-10 12:10:49 +01:00
*/
2011-01-03 00:00:17 +00:00
# ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma ( struct device * dev , unsigned long pfn )
2008-09-09 15:54:13 -04:00
{
2014-04-24 11:30:05 -04:00
if ( dev )
pfn - = dev - > dma_pfn_offset ;
2011-01-03 00:00:17 +00:00
return ( dma_addr_t ) __pfn_to_bus ( pfn ) ;
2008-09-09 15:54:13 -04:00
}
2008-08-10 12:10:49 +01:00
2011-01-03 00:00:17 +00:00
static inline unsigned long dma_to_pfn ( struct device * dev , dma_addr_t addr )
2009-10-31 16:07:16 +00:00
{
2014-04-24 11:30:05 -04:00
unsigned long pfn = __bus_to_pfn ( addr ) ;
if ( dev )
pfn + = dev - > dma_pfn_offset ;
return pfn ;
2009-10-31 16:07:16 +00:00
}
2008-08-10 12:10:49 +01:00
static inline void * dma_to_virt ( struct device * dev , dma_addr_t addr )
{
2014-04-24 11:30:05 -04:00
if ( dev ) {
unsigned long pfn = dma_to_pfn ( dev , addr ) ;
return phys_to_virt ( __pfn_to_phys ( pfn ) ) ;
}
2011-08-23 13:59:14 +01:00
return ( void * ) __bus_to_virt ( ( unsigned long ) addr ) ;
2008-08-10 12:10:49 +01:00
}
static inline dma_addr_t virt_to_dma ( struct device * dev , void * addr )
{
2014-04-24 11:30:05 -04:00
if ( dev )
return pfn_to_dma ( dev , virt_to_pfn ( addr ) ) ;
2008-08-10 12:10:49 +01:00
return ( dma_addr_t ) __virt_to_bus ( ( unsigned long ) ( addr ) ) ;
}
2013-08-01 03:12:01 +01:00
2008-08-10 12:10:49 +01:00
# else
2011-01-03 00:00:17 +00:00
static inline dma_addr_t pfn_to_dma ( struct device * dev , unsigned long pfn )
2008-08-10 12:10:49 +01:00
{
2011-01-03 00:00:17 +00:00
return __arch_pfn_to_dma ( dev , pfn ) ;
2008-08-10 12:10:49 +01:00
}
2011-01-03 00:00:17 +00:00
static inline unsigned long dma_to_pfn ( struct device * dev , dma_addr_t addr )
2009-10-31 16:07:16 +00:00
{
2011-01-03 00:00:17 +00:00
return __arch_dma_to_pfn ( dev , addr ) ;
2009-10-31 16:07:16 +00:00
}
2008-08-10 12:10:49 +01:00
static inline void * dma_to_virt ( struct device * dev , dma_addr_t addr )
{
return __arch_dma_to_virt ( dev , addr ) ;
}
static inline dma_addr_t virt_to_dma ( struct device * dev , void * addr )
{
return __arch_virt_to_dma ( dev , addr ) ;
}
# endif
2008-07-18 13:30:14 +04:00
2013-08-01 03:12:01 +01:00
/* The ARM override for dma_max_pfn() */
static inline unsigned long dma_max_pfn ( struct device * dev )
{
2016-09-29 08:32:55 +01:00
return dma_to_pfn ( dev , * dev - > dma_mask ) ;
2013-08-01 03:12:01 +01:00
}
# define dma_max_pfn(dev) dma_max_pfn(dev)
2014-08-27 15:49:10 +01:00
# define arch_setup_dma_ops arch_setup_dma_ops
2014-08-27 17:52:44 +01:00
extern void arch_setup_dma_ops ( struct device * dev , u64 dma_base , u64 size ,
2016-04-07 18:42:05 +01:00
const struct iommu_ops * iommu , bool coherent ) ;
2014-08-27 17:52:44 +01:00
# define arch_teardown_dma_ops arch_teardown_dma_ops
extern void arch_teardown_dma_ops ( struct device * dev ) ;
2014-04-24 11:30:06 -04:00
2014-11-20 10:41:40 +00:00
/* do not use this function in a driver */
static inline bool is_device_dma_coherent ( struct device * dev )
{
return dev - > archdata . dma_coherent ;
}
2013-10-30 01:21:27 +00:00
static inline dma_addr_t phys_to_dma ( struct device * dev , phys_addr_t paddr )
{
unsigned int offset = paddr & ~ PAGE_MASK ;
return pfn_to_dma ( dev , __phys_to_pfn ( paddr ) ) + offset ;
}
static inline phys_addr_t dma_to_phys ( struct device * dev , dma_addr_t dev_addr )
{
unsigned int offset = dev_addr & ~ PAGE_MASK ;
return __pfn_to_phys ( dma_to_pfn ( dev , dev_addr ) ) + offset ;
}
static inline bool dma_capable ( struct device * dev , dma_addr_t addr , size_t size )
{
u64 limit , mask ;
if ( ! dev - > dma_mask )
return 0 ;
mask = * dev - > dma_mask ;
limit = ( mask + 1 ) & ~ mask ;
if ( limit & & size > limit )
return 0 ;
if ( ( addr | ( addr + size - 1 ) ) & ~ mask )
return 0 ;
return 1 ;
}
static inline void dma_mark_clean ( void * addr , size_t size ) { }
2005-04-16 15:20:36 -07:00
/**
2012-05-16 18:31:23 +02:00
* arm_dma_alloc - allocate consistent memory for DMA
2005-04-16 15:20:36 -07:00
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ size : required memory size
* @ handle : bus - specific DMA address
2012-05-16 18:31:23 +02:00
* @ attrs : optinal attributes that specific mapping properties
2005-04-16 15:20:36 -07:00
*
2012-05-16 18:31:23 +02:00
* Allocate some memory for a device for performing DMA . This function
* allocates pages , and will return the CPU - viewed address , and sets @ handle
* to be the device - viewed address .
2005-04-16 15:20:36 -07:00
*/
2012-05-16 18:31:23 +02:00
extern void * arm_dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle ,
2016-08-03 13:46:00 -07:00
gfp_t gfp , unsigned long attrs ) ;
2012-05-16 18:31:23 +02:00
2005-04-16 15:20:36 -07:00
/**
2012-05-16 18:31:23 +02:00
* arm_dma_free - free memory allocated by arm_dma_alloc
2005-04-16 15:20:36 -07:00
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ size : size of memory originally requested in dma_alloc_coherent
* @ cpu_addr : CPU - view address returned from dma_alloc_coherent
* @ handle : device - view address returned from dma_alloc_coherent
2012-05-16 18:31:23 +02:00
* @ attrs : optinal attributes that specific mapping properties
2005-04-16 15:20:36 -07:00
*
* Free ( and unmap ) a DMA buffer previously allocated by
2012-05-16 18:31:23 +02:00
* arm_dma_alloc ( ) .
2005-04-16 15:20:36 -07:00
*
* References to memory and mappings associated with cpu_addr / handle
* during and after this call executing are illegal .
*/
2012-05-16 18:31:23 +02:00
extern void arm_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
2016-08-03 13:46:00 -07:00
dma_addr_t handle , unsigned long attrs ) ;
2012-05-16 18:31:23 +02:00
2005-04-16 15:20:36 -07:00
/**
2012-05-16 18:31:23 +02:00
* arm_dma_mmap - map a coherent DMA allocation into user space
2005-04-16 15:20:36 -07:00
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ vma : vm_area_struct describing requested user mapping
* @ cpu_addr : kernel CPU - view address returned from dma_alloc_coherent
* @ handle : device - view address returned from dma_alloc_coherent
* @ size : size of memory originally requested in dma_alloc_coherent
2012-05-16 18:31:23 +02:00
* @ attrs : optinal attributes that specific mapping properties
2005-04-16 15:20:36 -07:00
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space . The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released .
*/
2012-05-16 18:31:23 +02:00
extern int arm_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
2016-08-03 13:46:00 -07:00
unsigned long attrs ) ;
2005-04-16 15:20:36 -07:00
2012-08-20 11:19:25 +02:00
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256 KiB . It must be called
* before postcore_initcall .
*/
extern void __init init_dma_coherent_pool_size ( unsigned long size ) ;
2008-09-25 21:52:49 +01:00
/*
* For SA - 1111 , IXP425 , and ADI systems the dma - mapping functions are " magic "
* and utilize bounce buffers as needed to work around limited DMA windows .
*
* On the SA - 1111 , a bug limits DMA to only certain regions of RAM .
* On the IXP425 , the PCI inbound window is 64 MB ( 256 MB total RAM )
* On some ADI engineering systems , PCI inbound window is 32 MB ( 12 MB total RAM )
*
* The following are helper functions used by the dmabounce subystem
*
*/
/**
* dmabounce_register_dev
*
* @ dev : valid struct device pointer
* @ small_buf_size : size of buffers to use with small buffer pool
* @ large_buf_size : size of buffers to use with large buffer pool ( can be 0 )
2011-07-04 08:32:21 +01:00
* @ needs_bounce_fn : called to determine whether buffer needs bouncing
2008-09-25 21:52:49 +01:00
*
* This function should be called by low - level platform code to register
* a device as requireing DMA buffer bouncing . The function will allocate
* appropriate DMA pools for the device .
*/
2008-09-25 22:23:31 +01:00
extern int dmabounce_register_dev ( struct device * , unsigned long ,
2011-07-04 08:32:21 +01:00
unsigned long , int ( * ) ( struct device * , dma_addr_t , size_t ) ) ;
2008-09-25 21:52:49 +01:00
/**
* dmabounce_unregister_dev
*
* @ dev : valid struct device pointer
*
* This function should be called by low - level platform code when device
* that was previously registered with dmabounce_register_dev is removed
* from the system .
*
*/
extern void dmabounce_unregister_dev ( struct device * ) ;
2011-01-03 11:29:28 +00:00
2008-09-25 16:30:57 +01:00
/*
* The scatter list versions of the above methods .
2005-04-16 15:20:36 -07:00
*/
2012-02-10 19:55:20 +01:00
extern int arm_dma_map_sg ( struct device * , struct scatterlist * , int ,
2016-08-03 13:46:00 -07:00
enum dma_data_direction , unsigned long attrs ) ;
2012-02-10 19:55:20 +01:00
extern void arm_dma_unmap_sg ( struct device * , struct scatterlist * , int ,
2016-08-03 13:46:00 -07:00
enum dma_data_direction , unsigned long attrs ) ;
2012-02-10 19:55:20 +01:00
extern void arm_dma_sync_sg_for_cpu ( struct device * , struct scatterlist * , int ,
2008-09-25 22:23:31 +01:00
enum dma_data_direction ) ;
2012-02-10 19:55:20 +01:00
extern void arm_dma_sync_sg_for_device ( struct device * , struct scatterlist * , int ,
2008-09-25 22:23:31 +01:00
enum dma_data_direction ) ;
2012-06-13 10:01:15 +02:00
extern int arm_dma_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
2016-08-03 13:46:00 -07:00
unsigned long attrs ) ;
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif