2005-04-17 02:20:36 +04:00
# ifndef _ASM_I386_DMA_MAPPING_H
# define _ASM_I386_DMA_MAPPING_H
# include <linux/mm.h>
2007-05-09 11:15:27 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <asm/cache.h>
# include <asm/io.h>
2006-01-12 00:44:54 +03:00
# include <asm/bug.h>
2005-04-17 02:20:36 +04:00
# define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
# define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void * dma_alloc_coherent ( struct device * dev , size_t size ,
2005-10-07 10:46:04 +04:00
dma_addr_t * dma_handle , gfp_t flag ) ;
2005-04-17 02:20:36 +04:00
void dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle ) ;
static inline dma_addr_t
dma_map_single ( struct device * dev , void * ptr , size_t size ,
enum dma_data_direction direction )
{
2006-09-29 12:59:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2006-01-12 00:44:54 +03:00
WARN_ON ( size = = 0 ) ;
2005-04-17 02:20:36 +04:00
flush_write_buffers ( ) ;
return virt_to_phys ( ptr ) ;
}
static inline void
dma_unmap_single ( struct device * dev , dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
2006-09-29 12:59:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline int
2007-05-09 11:15:27 +04:00
dma_map_sg ( struct device * dev , struct scatterlist * sglist , int nents ,
2005-04-17 02:20:36 +04:00
enum dma_data_direction direction )
{
2007-05-09 11:15:27 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int i ;
2006-09-29 12:59:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2007-05-09 11:15:27 +04:00
WARN_ON ( nents = = 0 | | sglist [ 0 ] . length = = 0 ) ;
2005-04-17 02:20:36 +04:00
2007-05-09 11:15:27 +04:00
for_each_sg ( sglist , sg , nents , i ) {
2007-10-22 21:57:20 +04:00
BUG_ON ( ! sg_page ( sg ) ) ;
2005-04-17 02:20:36 +04:00
2007-10-22 21:57:20 +04:00
sg - > dma_address = sg_phys ( sg ) ;
2005-04-17 02:20:36 +04:00
}
flush_write_buffers ( ) ;
return nents ;
}
static inline dma_addr_t
dma_map_page ( struct device * dev , struct page * page , unsigned long offset ,
size_t size , enum dma_data_direction direction )
{
2006-09-29 12:59:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2005-04-17 02:20:36 +04:00
return page_to_phys ( page ) + offset ;
}
static inline void
dma_unmap_page ( struct device * dev , dma_addr_t dma_address , size_t size ,
enum dma_data_direction direction )
{
2006-09-29 12:59:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline void
dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nhwentries ,
enum dma_data_direction direction )
{
2006-09-29 12:59:49 +04:00
BUG_ON ( ! valid_dma_direction ( direction ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline void
dma_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
}
static inline void
dma_sync_single_for_device ( struct device * dev , dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
static inline void
dma_sync_single_range_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
}
static inline void
dma_sync_single_range_for_device ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
static inline void
dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
}
static inline void
dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
static inline int
dma_mapping_error ( dma_addr_t dma_addr )
{
return 0 ;
}
2007-06-20 14:23:32 +04:00
extern int forbid_dac ;
2005-04-17 02:20:36 +04:00
static inline int
dma_supported ( struct device * dev , u64 mask )
{
/*
* we fall back to GFP_DMA when the mask isn ' t all 1 s ,
* so we can ' t guarantee allocations that must be
* within a tighter range than GFP_DMA . .
*/
if ( mask < 0x00ffffff )
return 0 ;
2007-06-20 14:23:32 +04:00
/* Work around chipset bugs */
if ( forbid_dac > 0 & & mask > 0xffffffffULL )
return 0 ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
static inline int
dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
static inline int
dma_get_cache_alignment ( void )
{
/* no easy way to get cache size on all x86, so return the
* maximum possible , to be safe */
2006-01-08 12:01:28 +03:00
return ( 1 < < INTERNODE_CACHE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
}
2006-12-07 07:38:54 +03:00
# define dma_is_consistent(d, h) (1)
2005-04-17 02:20:36 +04:00
static inline void
2006-12-07 07:38:56 +03:00
dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
2005-04-17 02:20:36 +04:00
enum dma_data_direction direction )
{
flush_write_buffers ( ) ;
}
# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern int
dma_declare_coherent_memory ( struct device * dev , dma_addr_t bus_addr ,
dma_addr_t device_addr , size_t size , int flags ) ;
extern void
dma_release_declared_memory ( struct device * dev ) ;
extern void *
dma_mark_declared_memory_occupied ( struct device * dev ,
dma_addr_t device_addr , size_t size ) ;
# endif