2005-04-16 15:20:36 -07:00
# ifndef _ASM_IA64_DMA_MAPPING_H
# define _ASM_IA64_DMA_MAPPING_H
/*
* Copyright ( C ) 2003 - 2004 Hewlett - Packard Co
* David Mosberger - Tang < davidm @ hpl . hp . com >
*/
# include <asm/machvec.h>
2007-10-16 11:27:26 +02:00
# include <linux/scatterlist.h>
2008-10-17 12:14:13 -07:00
# include <asm/swiotlb.h>
2009-06-17 16:28:14 -07:00
# include <linux/dma-debug.h>
2008-10-17 12:14:13 -07:00
2008-11-24 16:47:17 -06:00
# define ARCH_HAS_DMA_GET_REQUIRED_MASK
2009-01-05 23:59:02 +09:00
extern struct dma_map_ops * dma_ops ;
2008-10-17 12:14:13 -07:00
extern struct ia64_machine_vector ia64_mv ;
extern void set_iommu_machvec ( void ) ;
2005-04-16 15:20:36 -07:00
2009-01-07 02:13:42 +09:00
extern void machvec_dma_sync_single ( struct device * , dma_addr_t , size_t ,
enum dma_data_direction ) ;
extern void machvec_dma_sync_sg ( struct device * , struct scatterlist * , int ,
enum dma_data_direction ) ;
2009-01-05 23:36:13 +09:00
static inline void * dma_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * daddr , gfp_t gfp )
{
2009-01-05 23:59:02 +09:00
struct dma_map_ops * ops = platform_dma_get_ops ( dev ) ;
2009-06-17 16:28:14 -07:00
void * caddr ;
caddr = ops - > alloc_coherent ( dev , size , daddr , gfp ) ;
debug_dma_alloc_coherent ( dev , size , * daddr , caddr ) ;
return caddr ;
2009-01-05 23:36:13 +09:00
}
2008-09-08 18:10:10 +09:00
2009-01-05 23:36:13 +09:00
static inline void dma_free_coherent ( struct device * dev , size_t size ,
void * caddr , dma_addr_t daddr )
2007-02-14 00:32:53 -08:00
{
2009-01-05 23:59:02 +09:00
struct dma_map_ops * ops = platform_dma_get_ops ( dev ) ;
2009-06-17 16:28:14 -07:00
debug_dma_free_coherent ( dev , size , caddr , daddr ) ;
2009-01-05 23:36:16 +09:00
ops - > free_coherent ( dev , size , caddr , daddr ) ;
2007-02-14 00:32:53 -08:00
}
2009-01-05 23:36:13 +09:00
# define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
# define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
2009-06-17 16:28:13 -07:00
# define get_dma_ops(dev) platform_dma_get_ops(dev)
2009-01-05 23:36:13 +09:00
2009-06-17 16:28:13 -07:00
# include <asm-generic/dma-mapping-common.h>
2009-01-05 23:36:13 +09:00
static inline int dma_mapping_error ( struct device * dev , dma_addr_t daddr )
{
2009-01-05 23:59:02 +09:00
struct dma_map_ops * ops = platform_dma_get_ops ( dev ) ;
2009-01-05 23:36:16 +09:00
return ops - > mapping_error ( dev , daddr ) ;
2008-04-29 01:00:32 -07:00
}
2005-04-16 15:20:36 -07:00
2009-01-05 23:36:13 +09:00
static inline int dma_supported ( struct device * dev , u64 mask )
{
2009-01-05 23:59:02 +09:00
struct dma_map_ops * ops = platform_dma_get_ops ( dev ) ;
return ops - > dma_supported ( dev , mask ) ;
2009-01-05 23:36:13 +09:00
}
2005-04-16 15:20:36 -07:00
static inline int
dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
2009-07-10 10:04:56 +09:00
static inline bool dma_capable ( struct device * dev , dma_addr_t addr , size_t size )
{
if ( ! dev - > dma_mask )
return 0 ;
2009-12-15 16:47:43 -08:00
return addr + size - 1 < = * dev - > dma_mask ;
2009-07-10 10:04:56 +09:00
}
2009-07-10 10:05:01 +09:00
static inline dma_addr_t phys_to_dma ( struct device * dev , phys_addr_t paddr )
{
return paddr ;
}
static inline phys_addr_t dma_to_phys ( struct device * dev , dma_addr_t daddr )
{
return daddr ;
}
2005-04-16 15:20:36 -07:00
static inline void
2006-12-06 20:38:56 -08:00
dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
/*
* IA - 64 is cache - coherent , so this is mostly a no - op . However , we do need to
* ensure that dma_cache_sync ( ) enforces order , hence the mb ( ) .
*/
mb ( ) ;
}
# endif /* _ASM_IA64_DMA_MAPPING_H */