2008-10-23 09:26:29 +04:00
# ifndef _ASM_X86_DMA_MAPPING_H
# define _ASM_X86_DMA_MAPPING_H
2008-03-26 00:36:20 +03:00
/*
* IOMMU interface . See Documentation / DMA - mapping . txt and DMA - API . txt for
* documentation .
*/
# include <linux/scatterlist.h>
# include <asm/io.h>
# include <asm/swiotlb.h>
2008-08-19 18:32:45 +04:00
# include <asm-generic/dma-coherent.h>
2008-03-26 00:36:20 +03:00
2008-03-26 00:36:36 +03:00
extern dma_addr_t bad_dma_address ;
2008-03-26 00:36:39 +03:00
extern int iommu_merge ;
2008-08-19 18:32:45 +04:00
extern struct device x86_dma_fallback_dev ;
2008-03-26 00:36:39 +03:00
extern int panic_on_overflow ;
2008-03-26 00:36:36 +03:00
2008-03-26 00:36:20 +03:00
struct dma_mapping_ops {
2008-07-26 06:44:49 +04:00
int ( * mapping_error ) ( struct device * dev ,
dma_addr_t dma_addr ) ;
2008-03-26 00:36:20 +03:00
void * ( * alloc_coherent ) ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t gfp ) ;
void ( * free_coherent ) ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle ) ;
2008-04-19 21:19:56 +04:00
dma_addr_t ( * map_single ) ( struct device * hwdev , phys_addr_t ptr ,
2008-03-26 00:36:20 +03:00
size_t size , int direction ) ;
void ( * unmap_single ) ( struct device * dev , dma_addr_t addr ,
size_t size , int direction ) ;
void ( * sync_single_for_cpu ) ( struct device * hwdev ,
dma_addr_t dma_handle , size_t size ,
int direction ) ;
void ( * sync_single_for_device ) ( struct device * hwdev ,
dma_addr_t dma_handle , size_t size ,
int direction ) ;
void ( * sync_single_range_for_cpu ) ( struct device * hwdev ,
dma_addr_t dma_handle , unsigned long offset ,
size_t size , int direction ) ;
void ( * sync_single_range_for_device ) ( struct device * hwdev ,
dma_addr_t dma_handle , unsigned long offset ,
size_t size , int direction ) ;
void ( * sync_sg_for_cpu ) ( struct device * hwdev ,
struct scatterlist * sg , int nelems ,
int direction ) ;
void ( * sync_sg_for_device ) ( struct device * hwdev ,
struct scatterlist * sg , int nelems ,
int direction ) ;
int ( * map_sg ) ( struct device * hwdev , struct scatterlist * sg ,
int nents , int direction ) ;
void ( * unmap_sg ) ( struct device * hwdev ,
struct scatterlist * sg , int nents ,
int direction ) ;
int ( * dma_supported ) ( struct device * hwdev , u64 mask ) ;
int is_phys ;
} ;
2008-07-26 06:44:49 +04:00
extern struct dma_mapping_ops * dma_ops ;
2008-03-26 00:36:21 +03:00
2008-07-26 06:44:49 +04:00
static inline struct dma_mapping_ops * get_dma_ops ( struct device * dev )
2008-03-26 00:36:37 +03:00
{
2008-07-26 06:44:49 +04:00
# ifdef CONFIG_X86_32
return dma_ops ;
# else
if ( unlikely ( ! dev ) | | ! dev - > archdata . dma_ops )
return dma_ops ;
else
return dev - > archdata . dma_ops ;
2008-10-23 09:26:29 +04:00
# endif /* _ASM_X86_DMA_MAPPING_H */
2008-07-26 06:44:49 +04:00
}
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error ( struct device * dev , dma_addr_t dma_addr )
{
# ifdef CONFIG_X86_32
return 0 ;
# else
struct dma_mapping_ops * ops = get_dma_ops ( dev ) ;
if ( ops - > mapping_error )
return ops - > mapping_error ( dev , dma_addr ) ;
2008-03-26 00:36:37 +03:00
return ( dma_addr = = bad_dma_address ) ;
2008-07-26 06:44:49 +04:00
# endif
2008-03-26 00:36:37 +03:00
}
2008-03-26 00:36:31 +03:00
# define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
# define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
2008-08-19 18:32:45 +04:00
# define dma_is_consistent(d, h) (1)
2008-03-26 00:36:31 +03:00
2008-03-26 00:36:34 +03:00
extern int dma_supported ( struct device * hwdev , u64 mask ) ;
extern int dma_set_mask ( struct device * dev , u64 mask ) ;
2008-09-24 15:48:35 +04:00
extern void * dma_generic_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_addr , gfp_t flag ) ;
2008-03-26 00:36:21 +03:00
static inline dma_addr_t
dma_map_single ( struct device * hwdev , void * ptr , size_t size ,
int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:21 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
return ops - > map_single ( hwdev , virt_to_phys ( ptr ) , size , direction ) ;
2008-03-26 00:36:21 +03:00
}
2008-03-26 00:36:22 +03:00
static inline void
dma_unmap_single ( struct device * dev , dma_addr_t addr , size_t size ,
int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( dev ) ;
2008-03-26 00:36:22 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
if ( ops - > unmap_single )
ops - > unmap_single ( dev , addr , size , direction ) ;
2008-03-26 00:36:22 +03:00
}
2008-03-26 00:36:23 +03:00
static inline int
dma_map_sg ( struct device * hwdev , struct scatterlist * sg ,
int nents , int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:23 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
return ops - > map_sg ( hwdev , sg , nents , direction ) ;
2008-03-26 00:36:23 +03:00
}
2008-03-26 00:36:24 +03:00
static inline void
dma_unmap_sg ( struct device * hwdev , struct scatterlist * sg , int nents ,
int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:24 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
if ( ops - > unmap_sg )
ops - > unmap_sg ( hwdev , sg , nents , direction ) ;
2008-03-26 00:36:24 +03:00
}
2008-03-26 00:36:25 +03:00
static inline void
dma_sync_single_for_cpu ( struct device * hwdev , dma_addr_t dma_handle ,
size_t size , int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:25 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
if ( ops - > sync_single_for_cpu )
ops - > sync_single_for_cpu ( hwdev , dma_handle , size , direction ) ;
2008-03-26 00:36:25 +03:00
flush_write_buffers ( ) ;
}
2008-03-26 00:36:26 +03:00
static inline void
dma_sync_single_for_device ( struct device * hwdev , dma_addr_t dma_handle ,
size_t size , int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:26 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
if ( ops - > sync_single_for_device )
ops - > sync_single_for_device ( hwdev , dma_handle , size , direction ) ;
2008-03-26 00:36:26 +03:00
flush_write_buffers ( ) ;
}
2008-03-26 00:36:27 +03:00
static inline void
dma_sync_single_range_for_cpu ( struct device * hwdev , dma_addr_t dma_handle ,
unsigned long offset , size_t size , int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:27 +03:00
2008-07-26 06:44:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
if ( ops - > sync_single_range_for_cpu )
ops - > sync_single_range_for_cpu ( hwdev , dma_handle , offset ,
size , direction ) ;
2008-03-26 00:36:27 +03:00
flush_write_buffers ( ) ;
}
2008-03-26 00:36:28 +03:00
static inline void
dma_sync_single_range_for_device ( struct device * hwdev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:28 +03:00
2008-07-26 06:44:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
if ( ops - > sync_single_range_for_device )
ops - > sync_single_range_for_device ( hwdev , dma_handle ,
offset , size , direction ) ;
2008-03-26 00:36:28 +03:00
flush_write_buffers ( ) ;
}
2008-03-26 00:36:29 +03:00
static inline void
dma_sync_sg_for_cpu ( struct device * hwdev , struct scatterlist * sg ,
int nelems , int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:29 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
if ( ops - > sync_sg_for_cpu )
ops - > sync_sg_for_cpu ( hwdev , sg , nelems , direction ) ;
2008-03-26 00:36:29 +03:00
flush_write_buffers ( ) ;
}
2008-03-26 00:36:30 +03:00
static inline void
dma_sync_sg_for_device ( struct device * hwdev , struct scatterlist * sg ,
int nelems , int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( hwdev ) ;
2008-03-26 00:36:30 +03:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
if ( ops - > sync_sg_for_device )
ops - > sync_sg_for_device ( hwdev , sg , nelems , direction ) ;
2008-03-26 00:36:30 +03:00
flush_write_buffers ( ) ;
}
2008-03-26 00:36:32 +03:00
static inline dma_addr_t dma_map_page ( struct device * dev , struct page * page ,
size_t offset , size_t size ,
int direction )
{
2008-07-26 06:44:49 +04:00
struct dma_mapping_ops * ops = get_dma_ops ( dev ) ;
2008-04-19 21:19:56 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2008-07-26 06:44:49 +04:00
return ops - > map_single ( dev , page_to_phys ( page ) + offset ,
size , direction ) ;
2008-03-26 00:36:32 +03:00
}
static inline void dma_unmap_page ( struct device * dev , dma_addr_t addr ,
size_t size , int direction )
{
dma_unmap_single ( dev , addr , size , direction ) ;
}
2008-03-26 00:36:33 +03:00
static inline void
dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
enum dma_data_direction dir )
{
flush_write_buffers ( ) ;
}
2008-03-26 00:36:38 +03:00
2008-03-26 00:36:39 +03:00
static inline int dma_get_cache_alignment ( void )
{
/* no easy way to get cache size on all x86, so return the
* maximum possible , to be safe */
return boot_cpu_data . x86_clflush_size ;
}
2008-09-08 13:10:13 +04:00
static inline unsigned long dma_alloc_coherent_mask ( struct device * dev ,
gfp_t gfp )
{
unsigned long dma_mask = 0 ;
2008-03-26 00:36:39 +03:00
2008-09-08 13:10:13 +04:00
dma_mask = dev - > coherent_dma_mask ;
if ( ! dma_mask )
dma_mask = ( gfp & GFP_DMA ) ? DMA_24BIT_MASK : DMA_32BIT_MASK ;
return dma_mask ;
}
static inline gfp_t dma_alloc_coherent_gfp_flags ( struct device * dev , gfp_t gfp )
{
unsigned long dma_mask = dma_alloc_coherent_mask ( dev , gfp ) ;
2008-10-23 15:46:55 +04:00
if ( dma_mask < = DMA_24BIT_MASK )
gfp | = GFP_DMA ;
# ifdef CONFIG_X86_64
2008-09-08 13:10:13 +04:00
if ( dma_mask < = DMA_32BIT_MASK & & ! ( gfp & GFP_DMA ) )
gfp | = GFP_DMA32 ;
# endif
return gfp ;
}
2008-08-19 18:32:45 +04:00
static inline void *
dma_alloc_coherent ( struct device * dev , size_t size , dma_addr_t * dma_handle ,
gfp_t gfp )
{
struct dma_mapping_ops * ops = get_dma_ops ( dev ) ;
void * memory ;
2008-09-08 13:10:12 +04:00
gfp & = ~ ( __GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32 ) ;
2008-08-19 18:32:45 +04:00
if ( dma_alloc_from_coherent ( dev , size , dma_handle , & memory ) )
return memory ;
if ( ! dev ) {
dev = & x86_dma_fallback_dev ;
gfp | = GFP_DMA ;
}
2008-09-09 19:49:48 +04:00
if ( ! is_device_dma_capable ( dev ) )
2008-09-08 13:10:11 +04:00
return NULL ;
2008-09-08 13:10:13 +04:00
if ( ! ops - > alloc_coherent )
return NULL ;
return ops - > alloc_coherent ( dev , size , dma_handle ,
dma_alloc_coherent_gfp_flags ( dev , gfp ) ) ;
2008-08-19 18:32:45 +04:00
}
static inline void dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t bus )
{
struct dma_mapping_ops * ops = get_dma_ops ( dev ) ;
WARN_ON ( irqs_disabled ( ) ) ; /* for portability */
if ( dma_release_from_coherent ( dev , get_order ( size ) , vaddr ) )
return ;
if ( ops - > free_coherent )
ops - > free_coherent ( dev , size , vaddr , bus ) ;
}
2008-03-26 00:36:39 +03:00
2008-03-26 00:36:20 +03:00
# endif