2005-04-16 15:20:36 -07:00
/*
2005-11-19 01:40:46 +11:00
* Copyright ( C ) 2004 IBM
*
* Implements the generic device dma API for powerpc .
* the pci and vio busses
2005-04-16 15:20:36 -07:00
*/
2005-11-19 01:40:46 +11:00
# ifndef _ASM_DMA_MAPPING_H
# define _ASM_DMA_MAPPING_H
2007-10-16 14:54:33 -05:00
# ifdef __KERNEL__
# include <linux/types.h>
# include <linux/cache.h>
/* need struct page definitions */
# include <linux/mm.h>
# include <linux/scatterlist.h>
# include <asm/io.h>
# define DMA_ERROR_CODE (~(dma_addr_t)0x0)
# ifdef CONFIG_NOT_COHERENT_CACHE
/*
* DMA - consistent mapping functions for PowerPCs that don ' t support
* cache snooping . These allocate / free a region of uncached mapped
* memory space for use with DMA devices . Alternatively , you could
* allocate the space " normally " and use the cache management functions
* to ensure it is consistent .
*/
extern void * __dma_alloc_coherent ( size_t size , dma_addr_t * handle , gfp_t gfp ) ;
extern void __dma_free_coherent ( size_t size , void * vaddr ) ;
extern void __dma_sync ( void * vaddr , size_t size , int direction ) ;
extern void __dma_sync_page ( struct page * page , unsigned long offset ,
size_t size , int direction ) ;
# else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
* Cache coherent cores .
*/
# define __dma_alloc_coherent(gfp, size, handle) NULL
# define __dma_free_coherent(size, addr) ((void)0)
# define __dma_sync(addr, size, rw) ((void)0)
# define __dma_sync_page(pg, off, sz, rw) ((void)0)
# endif /* ! CONFIG_NOT_COHERENT_CACHE */
# ifdef CONFIG_PPC64
/*
* DMA operations are abstracted for G5 vs . i / pSeries , PCI vs . VIO
*/
struct dma_mapping_ops {
void * ( * alloc_coherent ) ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag ) ;
void ( * free_coherent ) ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle ) ;
dma_addr_t ( * map_single ) ( struct device * dev , void * ptr ,
size_t size , enum dma_data_direction direction ) ;
void ( * unmap_single ) ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction direction ) ;
int ( * map_sg ) ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction direction ) ;
void ( * unmap_sg ) ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction direction ) ;
int ( * dma_supported ) ( struct device * dev , u64 mask ) ;
int ( * set_dma_mask ) ( struct device * dev , u64 dma_mask ) ;
} ;
static inline struct dma_mapping_ops * get_dma_ops ( struct device * dev )
{
/* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now . The
* only ISA DMA device we support is the floppy and we have a hack
* in the floppy driver directly to get a device for us .
*/
if ( unlikely ( dev = = NULL | | dev - > archdata . dma_ops = = NULL ) )
return NULL ;
return dev - > archdata . dma_ops ;
2008-01-30 01:13:58 +11:00
}
static inline void set_dma_ops ( struct device * dev , struct dma_mapping_ops * ops )
{
dev - > archdata . dma_ops = ops ;
2007-10-16 14:54:33 -05:00
}
static inline int dma_supported ( struct device * dev , u64 mask )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
if ( unlikely ( dma_ops = = NULL ) )
return 0 ;
if ( dma_ops - > dma_supported = = NULL )
return 1 ;
return dma_ops - > dma_supported ( dev , mask ) ;
}
2007-12-17 17:35:53 +11:00
/* We have our own implementation of pci_set_dma_mask() */
# define HAVE_ARCH_PCI_SET_DMA_MASK
2007-10-16 14:54:33 -05:00
static inline int dma_set_mask ( struct device * dev , u64 dma_mask )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
if ( unlikely ( dma_ops = = NULL ) )
return - EIO ;
if ( dma_ops - > set_dma_mask ! = NULL )
return dma_ops - > set_dma_mask ( dev , dma_mask ) ;
if ( ! dev - > dma_mask | | ! dma_supported ( dev , dma_mask ) )
return - EIO ;
* dev - > dma_mask = dma_mask ;
return 0 ;
}
static inline void * dma_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
return dma_ops - > alloc_coherent ( dev , size , dma_handle , flag ) ;
}
static inline void dma_free_coherent ( struct device * dev , size_t size ,
void * cpu_addr , dma_addr_t dma_handle )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
dma_ops - > free_coherent ( dev , size , cpu_addr , dma_handle ) ;
}
static inline dma_addr_t dma_map_single ( struct device * dev , void * cpu_addr ,
size_t size ,
enum dma_data_direction direction )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
return dma_ops - > map_single ( dev , cpu_addr , size , direction ) ;
}
static inline void dma_unmap_single ( struct device * dev , dma_addr_t dma_addr ,
size_t size ,
enum dma_data_direction direction )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
dma_ops - > unmap_single ( dev , dma_addr , size , direction ) ;
}
static inline dma_addr_t dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
return dma_ops - > map_single ( dev , page_address ( page ) + offset , size ,
direction ) ;
}
2006-11-11 17:25:02 +11:00
static inline void dma_unmap_page ( struct device * dev , dma_addr_t dma_address ,
size_t size ,
enum dma_data_direction direction )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
dma_ops - > unmap_single ( dev , dma_address , size , direction ) ;
}
static inline int dma_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction direction )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
return dma_ops - > map_sg ( dev , sg , nents , direction ) ;
}
static inline void dma_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nhwentries ,
enum dma_data_direction direction )
{
struct dma_mapping_ops * dma_ops = get_dma_ops ( dev ) ;
BUG_ON ( ! dma_ops ) ;
dma_ops - > unmap_sg ( dev , sg , nhwentries , direction ) ;
}
2005-11-19 01:40:46 +11:00
2006-11-11 17:25:02 +11:00
/*
* Available generic sets of operations
*/
extern struct dma_mapping_ops dma_iommu_ops ;
extern struct dma_mapping_ops dma_direct_ops ;
2005-11-19 01:40:46 +11:00
# else /* CONFIG_PPC64 */
2005-04-16 15:20:36 -07:00
# define dma_supported(dev, mask) (1)
static inline int dma_set_mask ( struct device * dev , u64 dma_mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = dma_mask ;
return 0 ;
}
static inline void * dma_alloc_coherent ( struct device * dev , size_t size ,
2005-09-03 15:55:31 -07:00
dma_addr_t * dma_handle ,
2005-10-07 07:46:04 +01:00
gfp_t gfp )
2005-04-16 15:20:36 -07:00
{
# ifdef CONFIG_NOT_COHERENT_CACHE
return __dma_alloc_coherent ( size , dma_handle , gfp ) ;
# else
void * ret ;
/* ignore region specifiers */
gfp & = ~ ( __GFP_DMA | __GFP_HIGHMEM ) ;
if ( dev = = NULL | | dev - > coherent_dma_mask < 0xffffffff )
gfp | = GFP_DMA ;
ret = ( void * ) __get_free_pages ( gfp , get_order ( size ) ) ;
if ( ret ! = NULL ) {
memset ( ret , 0 , size ) ;
* dma_handle = virt_to_bus ( ret ) ;
}
return ret ;
# endif
}
static inline void
dma_free_coherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle )
{
# ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent ( size , vaddr ) ;
# else
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
# endif
}
static inline dma_addr_t
dma_map_single ( struct device * dev , void * ptr , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
__dma_sync ( ptr , size , direction ) ;
return virt_to_bus ( ptr ) ;
}
2007-08-02 01:41:15 +10:00
static inline void dma_unmap_single ( struct device * dev , dma_addr_t dma_addr ,
size_t size ,
enum dma_data_direction direction )
{
/* We do nothing. */
}
2005-04-16 15:20:36 -07:00
static inline dma_addr_t
dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
__dma_sync_page ( page , offset , size , direction ) ;
2005-07-27 11:44:12 -07:00
return page_to_bus ( page ) + offset ;
2005-04-16 15:20:36 -07:00
}
2007-08-02 01:41:15 +10:00
static inline void dma_unmap_page ( struct device * dev , dma_addr_t dma_address ,
size_t size ,
enum dma_data_direction direction )
{
/* We do nothing. */
}
2005-04-16 15:20:36 -07:00
static inline int
2007-10-12 13:44:12 +02:00
dma_map_sg ( struct device * dev , struct scatterlist * sgl , int nents ,
2005-04-16 15:20:36 -07:00
enum dma_data_direction direction )
{
2007-10-12 13:44:12 +02:00
struct scatterlist * sg ;
2005-04-16 15:20:36 -07:00
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
2007-10-12 13:44:12 +02:00
for_each_sg ( sgl , sg , nents , i ) {
2007-10-23 09:13:14 +02:00
BUG_ON ( ! sg_page ( sg ) ) ;
__dma_sync_page ( sg_page ( sg ) , sg - > offset , sg - > length , direction ) ;
sg - > dma_address = page_to_bus ( sg_page ( sg ) ) + sg - > offset ;
2005-04-16 15:20:36 -07:00
}
return nents ;
}
2007-08-02 01:41:15 +10:00
static inline void dma_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nhwentries ,
enum dma_data_direction direction )
{
/* We don't do anything here. */
}
2005-04-16 15:20:36 -07:00
2005-11-19 01:40:46 +11:00
# endif /* CONFIG_PPC64 */
static inline void dma_sync_single_for_cpu ( struct device * dev ,
dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
BUG_ON ( direction = = DMA_NONE ) ;
__dma_sync ( bus_to_virt ( dma_handle ) , size , direction ) ;
}
2005-11-19 01:40:46 +11:00
static inline void dma_sync_single_for_device ( struct device * dev ,
dma_addr_t dma_handle , size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
BUG_ON ( direction = = DMA_NONE ) ;
__dma_sync ( bus_to_virt ( dma_handle ) , size , direction ) ;
}
2005-11-19 01:40:46 +11:00
static inline void dma_sync_sg_for_cpu ( struct device * dev ,
2007-10-12 13:44:12 +02:00
struct scatterlist * sgl , int nents ,
2005-11-19 01:40:46 +11:00
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2007-10-12 13:44:12 +02:00
struct scatterlist * sg ;
2005-04-16 15:20:36 -07:00
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
2007-10-12 13:44:12 +02:00
for_each_sg ( sgl , sg , nents , i )
2007-10-23 09:13:14 +02:00
__dma_sync_page ( sg_page ( sg ) , sg - > offset , sg - > length , direction ) ;
2005-04-16 15:20:36 -07:00
}
2005-11-19 01:40:46 +11:00
static inline void dma_sync_sg_for_device ( struct device * dev ,
2007-10-12 13:44:12 +02:00
struct scatterlist * sgl , int nents ,
2005-11-19 01:40:46 +11:00
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2007-10-12 13:44:12 +02:00
struct scatterlist * sg ;
2005-04-16 15:20:36 -07:00
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
2007-10-12 13:44:12 +02:00
for_each_sg ( sgl , sg , nents , i )
2007-10-23 09:13:14 +02:00
__dma_sync_page ( sg_page ( sg ) , sg - > offset , sg - > length , direction ) ;
2005-04-16 15:20:36 -07:00
}
2005-11-19 01:40:46 +11:00
static inline int dma_mapping_error ( dma_addr_t dma_addr )
{
# ifdef CONFIG_PPC64
return ( dma_addr = = DMA_ERROR_CODE ) ;
# else
return 0 ;
# endif
}
2005-04-16 15:20:36 -07:00
# define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
# define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
# ifdef CONFIG_NOT_COHERENT_CACHE
2006-12-06 20:38:54 -08:00
# define dma_is_consistent(d, h) (0)
2005-04-16 15:20:36 -07:00
# else
2006-12-06 20:38:54 -08:00
# define dma_is_consistent(d, h) (1)
2005-04-16 15:20:36 -07:00
# endif
static inline int dma_get_cache_alignment ( void )
{
2005-11-19 01:40:46 +11:00
# ifdef CONFIG_PPC64
/* no easy way to get cache size on all processors, so return
* the maximum possible , to be safe */
2006-01-08 01:01:28 -08:00
return ( 1 < < INTERNODE_CACHE_SHIFT ) ;
2005-11-19 01:40:46 +11:00
# else
2005-04-16 15:20:36 -07:00
/*
* Each processor family will define its own L1_CACHE_SHIFT ,
* L1_CACHE_BYTES wraps to this , so this is always safe .
*/
return L1_CACHE_BYTES ;
2005-11-19 01:40:46 +11:00
# endif
2005-04-16 15:20:36 -07:00
}
2005-11-19 01:40:46 +11:00
static inline void dma_sync_single_range_for_cpu ( struct device * dev ,
dma_addr_t dma_handle , unsigned long offset , size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
/* just sync everything for now */
dma_sync_single_for_cpu ( dev , dma_handle , offset + size , direction ) ;
}
2005-11-19 01:40:46 +11:00
static inline void dma_sync_single_range_for_device ( struct device * dev ,
dma_addr_t dma_handle , unsigned long offset , size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
/* just sync everything for now */
dma_sync_single_for_device ( dev , dma_handle , offset + size , direction ) ;
}
2006-12-06 20:38:56 -08:00
static inline void dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
2005-11-19 01:40:46 +11:00
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2005-11-19 01:40:46 +11:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-04-16 15:20:36 -07:00
__dma_sync ( vaddr , size , ( int ) direction ) ;
}
2005-12-16 22:43:46 +01:00
# endif /* __KERNEL__ */
2005-11-19 01:40:46 +11:00
# endif /* _ASM_DMA_MAPPING_H */