2009-06-18 03:28:10 +04:00
# ifndef _ASM_GENERIC_DMA_MAPPING_H
# define _ASM_GENERIC_DMA_MAPPING_H
# include <linux/kmemcheck.h>
2011-11-24 05:12:59 +04:00
# include <linux/bug.h>
2009-06-18 03:28:10 +04:00
# include <linux/scatterlist.h>
# include <linux/dma-debug.h>
# include <linux/dma-attrs.h>
static inline dma_addr_t dma_map_single_attrs ( struct device * dev , void * ptr ,
size_t size ,
enum dma_data_direction dir ,
struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
dma_addr_t addr ;
kmemcheck_mark_initialized ( ptr , size ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
addr = ops - > map_page ( dev , virt_to_page ( ptr ) ,
( unsigned long ) ptr & ~ PAGE_MASK , size ,
dir , attrs ) ;
debug_dma_map_page ( dev , virt_to_page ( ptr ) ,
( unsigned long ) ptr & ~ PAGE_MASK , size ,
dir , addr , true ) ;
return addr ;
}
static inline void dma_unmap_single_attrs ( struct device * dev , dma_addr_t addr ,
size_t size ,
enum dma_data_direction dir ,
struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > unmap_page )
ops - > unmap_page ( dev , addr , size , dir , attrs ) ;
debug_dma_unmap_page ( dev , addr , size , dir , true ) ;
}
static inline int dma_map_sg_attrs ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir ,
struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
int i , ents ;
struct scatterlist * s ;
for_each_sg ( sg , s , nents , i )
kmemcheck_mark_initialized ( sg_virt ( s ) , s - > length ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
ents = ops - > map_sg ( dev , sg , nents , dir , attrs ) ;
debug_dma_map_sg ( dev , sg , nents , ents , dir ) ;
return ents ;
}
static inline void dma_unmap_sg_attrs ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir ,
struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
debug_dma_unmap_sg ( dev , sg , nents , dir ) ;
if ( ops - > unmap_sg )
ops - > unmap_sg ( dev , sg , nents , dir , attrs ) ;
}
static inline dma_addr_t dma_map_page ( struct device * dev , struct page * page ,
size_t offset , size_t size ,
enum dma_data_direction dir )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
dma_addr_t addr ;
kmemcheck_mark_initialized ( page_address ( page ) + offset , size ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
addr = ops - > map_page ( dev , page , offset , size , dir , NULL ) ;
debug_dma_map_page ( dev , page , offset , size , dir , addr , false ) ;
return addr ;
}
static inline void dma_unmap_page ( struct device * dev , dma_addr_t addr ,
size_t size , enum dma_data_direction dir )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > unmap_page )
ops - > unmap_page ( dev , addr , size , dir , NULL ) ;
debug_dma_unmap_page ( dev , addr , size , dir , false ) ;
}
static inline void dma_sync_single_for_cpu ( struct device * dev , dma_addr_t addr ,
size_t size ,
enum dma_data_direction dir )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > sync_single_for_cpu )
ops - > sync_single_for_cpu ( dev , addr , size , dir ) ;
debug_dma_sync_single_for_cpu ( dev , addr , size , dir ) ;
}
static inline void dma_sync_single_for_device ( struct device * dev ,
dma_addr_t addr , size_t size ,
enum dma_data_direction dir )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > sync_single_for_device )
ops - > sync_single_for_device ( dev , addr , size , dir ) ;
debug_dma_sync_single_for_device ( dev , addr , size , dir ) ;
}
static inline void dma_sync_single_range_for_cpu ( struct device * dev ,
dma_addr_t addr ,
unsigned long offset ,
size_t size ,
enum dma_data_direction dir )
{
2011-11-01 04:06:28 +04:00
const struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > sync_single_for_cpu )
ops - > sync_single_for_cpu ( dev , addr + offset , size , dir ) ;
debug_dma_sync_single_range_for_cpu ( dev , addr , offset , size , dir ) ;
2009-06-18 03:28:10 +04:00
}
static inline void dma_sync_single_range_for_device ( struct device * dev ,
dma_addr_t addr ,
unsigned long offset ,
size_t size ,
enum dma_data_direction dir )
{
2011-11-01 04:06:28 +04:00
const struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > sync_single_for_device )
ops - > sync_single_for_device ( dev , addr + offset , size , dir ) ;
debug_dma_sync_single_range_for_device ( dev , addr , offset , size , dir ) ;
2009-06-18 03:28:10 +04:00
}
static inline void
dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg ,
int nelems , enum dma_data_direction dir )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > sync_sg_for_cpu )
ops - > sync_sg_for_cpu ( dev , sg , nelems , dir ) ;
debug_dma_sync_sg_for_cpu ( dev , sg , nelems , dir ) ;
}
static inline void
dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg ,
int nelems , enum dma_data_direction dir )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
if ( ops - > sync_sg_for_device )
ops - > sync_sg_for_device ( dev , sg , nelems , dir ) ;
debug_dma_sync_sg_for_device ( dev , sg , nelems , dir ) ;
}
# define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
# define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
# define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
# define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
2012-06-14 15:03:04 +04:00
extern int dma_common_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ) ;
/**
* dma_mmap_attrs - map a coherent DMA allocation into user space
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ vma : vm_area_struct describing requested user mapping
* @ cpu_addr : kernel CPU - view address returned from dma_alloc_attrs
* @ handle : device - view address returned from dma_alloc_attrs
* @ size : size of memory originally requested in dma_alloc_attrs
* @ attrs : attributes of mapping properties requested in dma_alloc_attrs
*
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
* into user space . The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released .
*/
static inline int
dma_mmap_attrs ( struct device * dev , struct vm_area_struct * vma , void * cpu_addr ,
dma_addr_t dma_addr , size_t size , struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! ops ) ;
if ( ops - > mmap )
return ops - > mmap ( dev , vma , cpu_addr , dma_addr , size , attrs ) ;
return dma_common_mmap ( dev , vma , cpu_addr , dma_addr , size ) ;
}
# define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
static inline int dma_mmap_writecombine ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_WRITE_COMBINE , & attrs ) ;
return dma_mmap_attrs ( dev , vma , cpu_addr , dma_addr , size , & attrs ) ;
}
2012-06-13 12:05:52 +04:00
int
dma_common_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ) ;
static inline int
dma_get_sgtable_attrs ( struct device * dev , struct sg_table * sgt , void * cpu_addr ,
dma_addr_t dma_addr , size_t size , struct dma_attrs * attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
BUG_ON ( ! ops ) ;
if ( ops - > get_sgtable )
return ops - > get_sgtable ( dev , sgt , cpu_addr , dma_addr , size ,
attrs ) ;
return dma_common_get_sgtable ( dev , sgt , cpu_addr , dma_addr , size ) ;
}
# define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
2009-06-18 03:28:10 +04:00
# endif