2005-06-24 09:01:26 +04:00
/*
2006-10-04 01:01:26 +04:00
* include / asm - xtensa / dma - mapping . h
2005-06-24 09:01:26 +04:00
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2003 - 2005 Tensilica Inc .
*/
# ifndef _XTENSA_DMA_MAPPING_H
# define _XTENSA_DMA_MAPPING_H
# include <asm/cache.h>
# include <asm/io.h>
# include <linux/mm.h>
2007-10-24 15:28:40 +04:00
# include <linux/scatterlist.h>
2005-06-24 09:01:26 +04:00
/*
* DMA - consistent mapping functions .
*/
extern void * consistent_alloc ( int , size_t , dma_addr_t , unsigned long ) ;
extern void consistent_free ( void * , size_t , dma_addr_t ) ;
extern void consistent_sync ( void * , size_t , int ) ;
# define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
# define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void * dma_alloc_coherent ( struct device * dev , size_t size ,
2005-10-21 11:21:48 +04:00
dma_addr_t * dma_handle , gfp_t flag ) ;
2005-06-24 09:01:26 +04:00
void dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle ) ;
static inline dma_addr_t
dma_map_single ( struct device * dev , void * ptr , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
consistent_sync ( ptr , size , direction ) ;
return virt_to_phys ( ptr ) ;
}
static inline void
dma_unmap_single ( struct device * dev , dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
}
static inline int
dma_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction direction )
{
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
for ( i = 0 ; i < nents ; i + + , sg + + ) {
2007-10-23 22:38:41 +04:00
BUG_ON ( ! sg_page ( sg ) ) ;
2005-06-24 09:01:26 +04:00
2007-10-23 22:38:41 +04:00
sg - > dma_address = sg_phys ( sg ) ;
consistent_sync ( sg_virt ( sg ) , sg - > length , direction ) ;
2005-06-24 09:01:26 +04:00
}
return nents ;
}
static inline dma_addr_t
dma_map_page ( struct device * dev , struct page * page , unsigned long offset ,
size_t size , enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
return ( dma_addr_t ) ( page_to_pfn ( page ) ) * PAGE_SIZE + offset ;
}
static inline void
dma_unmap_page ( struct device * dev , dma_addr_t dma_address , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
}
static inline void
dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nhwentries ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
}
static inline void
dma_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
consistent_sync ( ( void * ) bus_to_virt ( dma_handle ) , size , direction ) ;
}
static inline void
dma_sync_single_for_device ( struct device * dev , dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
{
consistent_sync ( ( void * ) bus_to_virt ( dma_handle ) , size , direction ) ;
}
static inline void
dma_sync_single_range_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
consistent_sync ( ( void * ) bus_to_virt ( dma_handle ) + offset , size , direction ) ;
}
static inline void
dma_sync_single_range_for_device ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
consistent_sync ( ( void * ) bus_to_virt ( dma_handle ) + offset , size , direction ) ;
}
static inline void
dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction dir )
{
int i ;
for ( i = 0 ; i < nelems ; i + + , sg + + )
2007-10-23 22:38:41 +04:00
consistent_sync ( sg_virt ( sg ) , sg - > length , dir ) ;
2005-06-24 09:01:26 +04:00
}
static inline void
dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction dir )
{
int i ;
for ( i = 0 ; i < nelems ; i + + , sg + + )
2007-10-23 22:38:41 +04:00
consistent_sync ( sg_virt ( sg ) , sg - > length , dir ) ;
2005-06-24 09:01:26 +04:00
}
static inline int
2008-07-26 06:44:49 +04:00
dma_mapping_error ( struct device * dev , dma_addr_t dma_addr )
2005-06-24 09:01:26 +04:00
{
return 0 ;
}
static inline int
dma_supported ( struct device * dev , u64 mask )
{
return 1 ;
}
static inline int
dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
static inline int
dma_get_cache_alignment ( void )
{
return L1_CACHE_BYTES ;
}
2006-12-07 07:38:54 +03:00
# define dma_is_consistent(d, h) (1)
2005-06-24 09:01:26 +04:00
static inline void
2006-12-07 07:38:56 +03:00
dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
2005-06-24 09:01:26 +04:00
enum dma_data_direction direction )
{
consistent_sync ( vaddr , size , direction ) ;
}
# endif /* _XTENSA_DMA_MAPPING_H */