2005-04-17 02:20:36 +04:00
# ifndef _ASM_DMA_MAPPING_H
# define _ASM_DMA_MAPPING_H
# include <linux/device.h>
# include <asm/cache.h>
# include <asm/cacheflush.h>
# include <asm/scatterlist.h>
# include <asm/io.h>
2010-03-11 02:23:32 +03:00
/*
* See Documentation / DMA - API . txt for the description of how the
* following DMA API should work .
*/
2005-04-17 02:20:36 +04:00
# define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
# define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern unsigned long __nongprelbss dma_coherent_mem_start ;
extern unsigned long __nongprelbss dma_coherent_mem_end ;
2005-10-21 11:21:18 +04:00
void * dma_alloc_coherent ( struct device * dev , size_t size , dma_addr_t * dma_handle , gfp_t gfp ) ;
2005-04-17 02:20:36 +04:00
void dma_free_coherent ( struct device * dev , size_t size , void * vaddr , dma_addr_t dma_handle ) ;
extern dma_addr_t dma_map_single ( struct device * dev , void * ptr , size_t size ,
enum dma_data_direction direction ) ;
static inline
void dma_unmap_single ( struct device * dev , dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
}
extern int dma_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction direction ) ;
static inline
void dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nhwentries ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
}
extern
dma_addr_t dma_map_page ( struct device * dev , struct page * page , unsigned long offset ,
size_t size , enum dma_data_direction direction ) ;
static inline
void dma_unmap_page ( struct device * dev , dma_addr_t dma_address , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
}
static inline
void dma_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
}
static inline
void dma_sync_single_for_device ( struct device * dev , dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
static inline
void dma_sync_single_range_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
}
static inline
void dma_sync_single_range_for_device ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
static inline
void dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
}
static inline
void dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
static inline
2008-07-26 06:44:49 +04:00
int dma_mapping_error ( struct device * dev , dma_addr_t dma_addr )
2005-04-17 02:20:36 +04:00
{
return 0 ;
}
static inline
int dma_supported ( struct device * dev , u64 mask )
{
/*
* we fall back to GFP_DMA when the mask isn ' t all 1 s ,
* so we can ' t guarantee allocations that must be
* within a tighter range than GFP_DMA . .
*/
if ( mask < 0x00ffffff )
return 0 ;
return 1 ;
}
static inline
int dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
static inline
2006-12-07 07:38:56 +03:00
void dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
2005-04-17 02:20:36 +04:00
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
# endif /* _ASM_DMA_MAPPING_H */