2005-04-16 15:20:36 -07:00
# ifndef _ASM_IA64_DMA_MAPPING_H
# define _ASM_IA64_DMA_MAPPING_H
/*
* Copyright ( C ) 2003 - 2004 Hewlett - Packard Co
* David Mosberger - Tang < davidm @ hpl . hp . com >
*/
# include <asm/machvec.h>
2007-10-16 11:27:26 +02:00
# include <linux/scatterlist.h>
2005-04-16 15:20:36 -07:00
# define dma_alloc_coherent platform_dma_alloc_coherent
2007-02-14 00:32:53 -08:00
/* coherent mem. is cheap */
static inline void *
dma_alloc_noncoherent ( struct device * dev , size_t size , dma_addr_t * dma_handle ,
gfp_t flag )
{
return dma_alloc_coherent ( dev , size , dma_handle , flag ) ;
}
2005-04-16 15:20:36 -07:00
# define dma_free_coherent platform_dma_free_coherent
2007-02-14 00:32:53 -08:00
static inline void
dma_free_noncoherent ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t dma_handle )
{
dma_free_coherent ( dev , size , cpu_addr , dma_handle ) ;
}
2005-04-16 15:20:36 -07:00
# define dma_map_single platform_dma_map_single
# define dma_map_sg platform_dma_map_sg
# define dma_unmap_single platform_dma_unmap_single
# define dma_unmap_sg platform_dma_unmap_sg
# define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
# define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
# define dma_sync_single_for_device platform_dma_sync_single_for_device
# define dma_sync_sg_for_device platform_dma_sync_sg_for_device
# define dma_mapping_error platform_dma_mapping_error
# define dma_map_page(dev, pg, off, size, dir) \
dma_map_single ( dev , page_address ( pg ) + ( off ) , ( size ) , ( dir ) )
# define dma_unmap_page(dev, dma_addr, size, dir) \
dma_unmap_single ( dev , dma_addr , size , dir )
/*
* Rest of this file is part of the " Advanced DMA API " . Use at your own risk .
* See Documentation / DMA - API . txt for details .
*/
# define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
dma_sync_single_for_cpu ( dev , dma_handle , size , dir )
# define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
dma_sync_single_for_device ( dev , dma_handle , size , dir )
# define dma_supported platform_dma_supported
static inline int
dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
2005-11-07 00:57:54 -08:00
extern int dma_get_cache_alignment ( void ) ;
2005-04-16 15:20:36 -07:00
static inline void
2006-12-06 20:38:56 -08:00
dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
/*
* IA - 64 is cache - coherent , so this is mostly a no - op . However , we do need to
* ensure that dma_cache_sync ( ) enforces order , hence the mb ( ) .
*/
mb ( ) ;
}
2006-12-06 20:38:54 -08:00
# define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
2005-04-16 15:20:36 -07:00
# endif /* _ASM_IA64_DMA_MAPPING_H */