2005-04-16 15:20:36 -07:00
# ifndef ASMARM_DMA_MAPPING_H
# define ASMARM_DMA_MAPPING_H
# ifdef __KERNEL__
2008-08-10 12:10:49 +01:00
# include <linux/mm_types.h>
2007-10-23 12:37:59 +02:00
# include <linux/scatterlist.h>
2012-05-16 18:31:23 +02:00
# include <linux/dma-attrs.h>
2011-01-03 11:29:28 +00:00
# include <linux/dma-debug.h>
2005-04-16 15:20:36 -07:00
2008-07-18 13:30:14 +04:00
# include <asm-generic/dma-coherent.h>
2008-08-10 12:10:49 +01:00
# include <asm/memory.h>
2012-02-29 14:45:28 +01:00
# define DMA_ERROR_CODE (~0)
2012-02-10 19:55:20 +01:00
extern struct dma_map_ops arm_dma_ops ;
2012-08-21 12:20:17 +02:00
extern struct dma_map_ops arm_coherent_dma_ops ;
2012-02-10 19:55:20 +01:00
static inline struct dma_map_ops * get_dma_ops ( struct device * dev )
{
if ( dev & & dev - > archdata . dma_ops )
return dev - > archdata . dma_ops ;
return & arm_dma_ops ;
}
static inline void set_dma_ops ( struct device * dev , struct dma_map_ops * ops )
{
BUG_ON ( ! dev ) ;
dev - > archdata . dma_ops = ops ;
}
# include <asm-generic/dma-mapping-common.h>
static inline int dma_set_mask ( struct device * dev , u64 mask )
{
return get_dma_ops ( dev ) - > set_dma_mask ( dev , mask ) ;
}
2012-02-29 14:45:28 +01:00
2011-01-03 00:00:17 +00:00
# ifdef __arch_page_to_dma
# error Please update to __arch_pfn_to_dma
# endif
2008-08-10 12:10:49 +01:00
/*
2011-01-03 00:00:17 +00:00
* dma_to_pfn / pfn_to_dma / dma_to_virt / virt_to_dma are architecture private
* functions used internally by the DMA - mapping API to provide DMA
* addresses . They must not be used by drivers .
2008-08-10 12:10:49 +01:00
*/
2011-01-03 00:00:17 +00:00
# ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma ( struct device * dev , unsigned long pfn )
2008-09-09 15:54:13 -04:00
{
2011-01-03 00:00:17 +00:00
return ( dma_addr_t ) __pfn_to_bus ( pfn ) ;
2008-09-09 15:54:13 -04:00
}
2008-08-10 12:10:49 +01:00
2011-01-03 00:00:17 +00:00
static inline unsigned long dma_to_pfn ( struct device * dev , dma_addr_t addr )
2009-10-31 16:07:16 +00:00
{
2011-01-03 00:00:17 +00:00
return __bus_to_pfn ( addr ) ;
2009-10-31 16:07:16 +00:00
}
2008-08-10 12:10:49 +01:00
static inline void * dma_to_virt ( struct device * dev , dma_addr_t addr )
{
2011-08-23 13:59:14 +01:00
return ( void * ) __bus_to_virt ( ( unsigned long ) addr ) ;
2008-08-10 12:10:49 +01:00
}
static inline dma_addr_t virt_to_dma ( struct device * dev , void * addr )
{
return ( dma_addr_t ) __virt_to_bus ( ( unsigned long ) ( addr ) ) ;
}
# else
2011-01-03 00:00:17 +00:00
static inline dma_addr_t pfn_to_dma ( struct device * dev , unsigned long pfn )
2008-08-10 12:10:49 +01:00
{
2011-01-03 00:00:17 +00:00
return __arch_pfn_to_dma ( dev , pfn ) ;
2008-08-10 12:10:49 +01:00
}
2011-01-03 00:00:17 +00:00
static inline unsigned long dma_to_pfn ( struct device * dev , dma_addr_t addr )
2009-10-31 16:07:16 +00:00
{
2011-01-03 00:00:17 +00:00
return __arch_dma_to_pfn ( dev , addr ) ;
2009-10-31 16:07:16 +00:00
}
2008-08-10 12:10:49 +01:00
static inline void * dma_to_virt ( struct device * dev , dma_addr_t addr )
{
return __arch_dma_to_virt ( dev , addr ) ;
}
static inline dma_addr_t virt_to_dma ( struct device * dev , void * addr )
{
return __arch_virt_to_dma ( dev , addr ) ;
}
# endif
2008-07-18 13:30:14 +04:00
2005-04-16 15:20:36 -07:00
/*
* DMA errors are defined by all - bits - set in the DMA address .
*/
2008-07-25 19:44:49 -07:00
static inline int dma_mapping_error ( struct device * dev , dma_addr_t dma_addr )
2005-04-16 15:20:36 -07:00
{
2012-10-22 20:44:03 +08:00
debug_dma_mapping_error ( dev , dma_addr ) ;
2012-02-29 14:45:28 +01:00
return dma_addr = = DMA_ERROR_CODE ;
2005-04-16 15:20:36 -07:00
}
2007-02-12 19:26:05 +00:00
/*
* Dummy noncoherent implementation . We don ' t provide a dma_cache_sync
* function so drivers using this API are highlighted with build warnings .
*/
2008-09-25 22:23:31 +01:00
static inline void * dma_alloc_noncoherent ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp )
2007-02-12 19:26:05 +00:00
{
return NULL ;
}
2008-09-25 22:23:31 +01:00
static inline void dma_free_noncoherent ( struct device * dev , size_t size ,
void * cpu_addr , dma_addr_t handle )
2007-02-12 19:26:05 +00:00
{
}
2012-02-10 19:55:20 +01:00
extern int dma_supported ( struct device * dev , u64 mask ) ;
2012-11-21 09:39:19 +01:00
extern int arm_dma_set_mask ( struct device * dev , u64 dma_mask ) ;
2005-04-16 15:20:36 -07:00
/**
2012-05-16 18:31:23 +02:00
* arm_dma_alloc - allocate consistent memory for DMA
2005-04-16 15:20:36 -07:00
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ size : required memory size
* @ handle : bus - specific DMA address
2012-05-16 18:31:23 +02:00
* @ attrs : optinal attributes that specific mapping properties
2005-04-16 15:20:36 -07:00
*
2012-05-16 18:31:23 +02:00
* Allocate some memory for a device for performing DMA . This function
* allocates pages , and will return the CPU - viewed address , and sets @ handle
* to be the device - viewed address .
2005-04-16 15:20:36 -07:00
*/
2012-05-16 18:31:23 +02:00
extern void * arm_dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle ,
gfp_t gfp , struct dma_attrs * attrs ) ;
# define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
static inline void * dma_alloc_attrs ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag ,
struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
void * cpu_addr ;
BUG_ON ( ! ops ) ;
cpu_addr = ops - > alloc ( dev , size , dma_handle , flag , attrs ) ;
debug_dma_alloc_coherent ( dev , size , * dma_handle , cpu_addr ) ;
return cpu_addr ;
}
2005-04-16 15:20:36 -07:00
/**
2012-05-16 18:31:23 +02:00
* arm_dma_free - free memory allocated by arm_dma_alloc
2005-04-16 15:20:36 -07:00
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ size : size of memory originally requested in dma_alloc_coherent
* @ cpu_addr : CPU - view address returned from dma_alloc_coherent
* @ handle : device - view address returned from dma_alloc_coherent
2012-05-16 18:31:23 +02:00
* @ attrs : optinal attributes that specific mapping properties
2005-04-16 15:20:36 -07:00
*
* Free ( and unmap ) a DMA buffer previously allocated by
2012-05-16 18:31:23 +02:00
* arm_dma_alloc ( ) .
2005-04-16 15:20:36 -07:00
*
* References to memory and mappings associated with cpu_addr / handle
* during and after this call executing are illegal .
*/
2012-05-16 18:31:23 +02:00
extern void arm_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , struct dma_attrs * attrs ) ;
# define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
static inline void dma_free_attrs ( struct device * dev , size_t size ,
void * cpu_addr , dma_addr_t dma_handle ,
struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! ops ) ;
debug_dma_free_coherent ( dev , size , cpu_addr , dma_handle ) ;
ops - > free ( dev , size , cpu_addr , dma_handle , attrs ) ;
}
2005-04-16 15:20:36 -07:00
/**
2012-05-16 18:31:23 +02:00
* arm_dma_mmap - map a coherent DMA allocation into user space
2005-04-16 15:20:36 -07:00
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ vma : vm_area_struct describing requested user mapping
* @ cpu_addr : kernel CPU - view address returned from dma_alloc_coherent
* @ handle : device - view address returned from dma_alloc_coherent
* @ size : size of memory originally requested in dma_alloc_coherent
2012-05-16 18:31:23 +02:00
* @ attrs : optinal attributes that specific mapping properties
2005-04-16 15:20:36 -07:00
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space . The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released .
*/
2012-05-16 18:31:23 +02:00
extern int arm_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs ) ;
2005-04-16 15:20:36 -07:00
2012-05-16 18:31:23 +02:00
static inline void * dma_alloc_writecombine ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag )
{
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_WRITE_COMBINE , & attrs ) ;
return dma_alloc_attrs ( dev , size , dma_handle , flag , & attrs ) ;
}
2005-04-16 15:20:36 -07:00
2012-05-16 18:31:23 +02:00
static inline void dma_free_writecombine ( struct device * dev , size_t size ,
void * cpu_addr , dma_addr_t dma_handle )
{
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_WRITE_COMBINE , & attrs ) ;
return dma_free_attrs ( dev , size , cpu_addr , dma_handle , & attrs ) ;
}
2012-08-20 11:19:25 +02:00
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256 KiB . It must be called
* before postcore_initcall .
*/
extern void __init init_dma_coherent_pool_size ( unsigned long size ) ;
2008-09-25 21:52:49 +01:00
/*
* For SA - 1111 , IXP425 , and ADI systems the dma - mapping functions are " magic "
* and utilize bounce buffers as needed to work around limited DMA windows .
*
* On the SA - 1111 , a bug limits DMA to only certain regions of RAM .
* On the IXP425 , the PCI inbound window is 64 MB ( 256 MB total RAM )
* On some ADI engineering systems , PCI inbound window is 32 MB ( 12 MB total RAM )
*
* The following are helper functions used by the dmabounce subystem
*
*/
/**
* dmabounce_register_dev
*
* @ dev : valid struct device pointer
* @ small_buf_size : size of buffers to use with small buffer pool
* @ large_buf_size : size of buffers to use with large buffer pool ( can be 0 )
2011-07-04 08:32:21 +01:00
* @ needs_bounce_fn : called to determine whether buffer needs bouncing
2008-09-25 21:52:49 +01:00
*
* This function should be called by low - level platform code to register
* a device as requireing DMA buffer bouncing . The function will allocate
* appropriate DMA pools for the device .
*/
2008-09-25 22:23:31 +01:00
extern int dmabounce_register_dev ( struct device * , unsigned long ,
2011-07-04 08:32:21 +01:00
unsigned long , int ( * ) ( struct device * , dma_addr_t , size_t ) ) ;
2008-09-25 21:52:49 +01:00
/**
* dmabounce_unregister_dev
*
* @ dev : valid struct device pointer
*
* This function should be called by low - level platform code when device
* that was previously registered with dmabounce_register_dev is removed
* from the system .
*
*/
extern void dmabounce_unregister_dev ( struct device * ) ;
2011-01-03 11:29:28 +00:00
2008-09-25 16:30:57 +01:00
/*
* The scatter list versions of the above methods .
2005-04-16 15:20:36 -07:00
*/
2012-02-10 19:55:20 +01:00
extern int arm_dma_map_sg ( struct device * , struct scatterlist * , int ,
enum dma_data_direction , struct dma_attrs * attrs ) ;
extern void arm_dma_unmap_sg ( struct device * , struct scatterlist * , int ,
enum dma_data_direction , struct dma_attrs * attrs ) ;
extern void arm_dma_sync_sg_for_cpu ( struct device * , struct scatterlist * , int ,
2008-09-25 22:23:31 +01:00
enum dma_data_direction ) ;
2012-02-10 19:55:20 +01:00
extern void arm_dma_sync_sg_for_device ( struct device * , struct scatterlist * , int ,
2008-09-25 22:23:31 +01:00
enum dma_data_direction ) ;
2012-06-13 10:01:15 +02:00
extern int arm_dma_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs ) ;
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif