2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2000 Ani Joshi < ajoshi @ unixbox . com >
2006-11-16 02:56:12 +00:00
* Copyright ( C ) 2000 , 2001 , 06 Ralf Baechle < ralf @ linux - mips . org >
2005-04-16 15:20:36 -07:00
* swiped from i386 , and cloned for MIPS by Geert , polished by Ralf .
*/
2006-11-16 02:56:12 +00:00
2005-04-16 15:20:36 -07:00
# include <linux/types.h>
2006-11-16 02:56:12 +00:00
# include <linux/dma-mapping.h>
2005-04-16 15:20:36 -07:00
# include <linux/mm.h>
# include <linux/module.h>
2007-10-23 12:32:34 +02:00
# include <linux/scatterlist.h>
2007-10-29 19:35:33 +00:00
# include <linux/string.h>
2005-04-16 15:20:36 -07:00
# include <asm/cache.h>
# include <asm/io.h>
2006-11-16 02:56:12 +00:00
# include <dma-coherence.h>
2007-03-19 17:36:42 +01:00
static inline unsigned long dma_addr_to_virt ( dma_addr_t dma_addr )
{
unsigned long addr = plat_dma_addr_to_phys ( dma_addr ) ;
return ( unsigned long ) phys_to_virt ( addr ) ;
}
2005-04-16 15:20:36 -07:00
/*
* Warning on the terminology - Linux calls an uncached area coherent ;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent .
*/
2006-11-16 02:56:12 +00:00
static inline int cpu_is_noncoherent_r10000 ( struct device * dev )
{
return ! plat_device_is_coherent ( dev ) & &
2007-10-11 23:46:15 +01:00
( current_cpu_type ( ) = = CPU_R10000 | |
current_cpu_type ( ) = = CPU_R12000 ) ;
2006-11-16 02:56:12 +00:00
}
2007-11-03 02:05:43 +00:00
static gfp_t massage_gfp_flags ( const struct device * dev , gfp_t gfp )
{
/* ignore region specifiers */
gfp & = ~ ( __GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM ) ;
2007-12-30 12:45:40 +01:00
# ifdef CONFIG_ZONE_DMA
2007-11-03 02:05:43 +00:00
if ( dev = = NULL )
gfp | = __GFP_DMA ;
else if ( dev - > coherent_dma_mask < DMA_BIT_MASK ( 24 ) )
gfp | = __GFP_DMA ;
else
# endif
# ifdef CONFIG_ZONE_DMA32
if ( dev - > coherent_dma_mask < DMA_BIT_MASK ( 32 ) )
gfp | = __GFP_DMA32 ;
else
# endif
;
/* Don't invoke OOM killer */
gfp | = __GFP_NORETRY ;
return gfp ;
}
2005-04-16 15:20:36 -07:00
void * dma_alloc_noncoherent ( struct device * dev , size_t size ,
2005-10-21 03:21:23 -04:00
dma_addr_t * dma_handle , gfp_t gfp )
2005-04-16 15:20:36 -07:00
{
void * ret ;
2006-11-16 02:56:12 +00:00
2007-11-03 02:05:43 +00:00
gfp = massage_gfp_flags ( dev , gfp ) ;
2005-04-16 15:20:36 -07:00
ret = ( void * ) __get_free_pages ( gfp , get_order ( size ) ) ;
if ( ret ! = NULL ) {
memset ( ret , 0 , size ) ;
2006-11-16 02:56:12 +00:00
* dma_handle = plat_map_dma_mem ( dev , ret , size ) ;
2005-04-16 15:20:36 -07:00
}
return ret ;
}
EXPORT_SYMBOL ( dma_alloc_noncoherent ) ;
void * dma_alloc_coherent ( struct device * dev , size_t size ,
2005-10-21 03:21:23 -04:00
dma_addr_t * dma_handle , gfp_t gfp )
2005-04-16 15:20:36 -07:00
{
void * ret ;
2007-11-03 02:05:43 +00:00
gfp = massage_gfp_flags ( dev , gfp ) ;
2006-11-16 02:56:12 +00:00
ret = ( void * ) __get_free_pages ( gfp , get_order ( size ) ) ;
2005-04-16 15:20:36 -07:00
if ( ret ) {
2006-11-16 02:56:12 +00:00
memset ( ret , 0 , size ) ;
* dma_handle = plat_map_dma_mem ( dev , ret , size ) ;
if ( ! plat_device_is_coherent ( dev ) ) {
dma_cache_wback_inv ( ( unsigned long ) ret , size ) ;
ret = UNCAC_ADDR ( ret ) ;
}
2005-04-16 15:20:36 -07:00
}
return ret ;
}
EXPORT_SYMBOL ( dma_alloc_coherent ) ;
void dma_free_noncoherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle )
{
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
EXPORT_SYMBOL ( dma_free_noncoherent ) ;
void dma_free_coherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle )
{
unsigned long addr = ( unsigned long ) vaddr ;
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) )
addr = CAC_ADDR ( addr ) ;
2005-04-16 15:20:36 -07:00
free_pages ( addr , get_order ( size ) ) ;
}
EXPORT_SYMBOL ( dma_free_coherent ) ;
static inline void __dma_sync ( unsigned long addr , size_t size ,
enum dma_data_direction direction )
{
switch ( direction ) {
case DMA_TO_DEVICE :
dma_cache_wback ( addr , size ) ;
break ;
case DMA_FROM_DEVICE :
dma_cache_inv ( addr , size ) ;
break ;
case DMA_BIDIRECTIONAL :
dma_cache_wback_inv ( addr , size ) ;
break ;
default :
BUG ( ) ;
}
}
dma_addr_t dma_map_single ( struct device * dev , void * ptr , size_t size ,
enum dma_data_direction direction )
{
unsigned long addr = ( unsigned long ) ptr ;
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) )
__dma_sync ( addr , size , direction ) ;
2005-04-16 15:20:36 -07:00
2006-11-16 02:56:12 +00:00
return plat_map_dma_mem ( dev , ptr , size ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_map_single ) ;
void dma_unmap_single ( struct device * dev , dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
2006-11-16 02:56:12 +00:00
if ( cpu_is_noncoherent_r10000 ( dev ) )
2007-03-19 17:36:42 +01:00
__dma_sync ( dma_addr_to_virt ( dma_addr ) , size ,
2006-11-16 02:56:12 +00:00
direction ) ;
2005-04-16 15:20:36 -07:00
2006-11-16 02:56:12 +00:00
plat_unmap_dma_mem ( dma_addr ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_unmap_single ) ;
int dma_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction direction )
{
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
for ( i = 0 ; i < nents ; i + + , sg + + ) {
unsigned long addr ;
2005-09-03 15:56:17 -07:00
2007-10-22 20:02:46 +02:00
addr = ( unsigned long ) sg_virt ( sg ) ;
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) & & addr )
2007-10-22 20:02:46 +02:00
__dma_sync ( addr , sg - > length , direction ) ;
2007-05-18 14:32:36 +02:00
sg - > dma_address = plat_map_dma_mem ( dev ,
2007-10-22 20:02:46 +02:00
( void * ) addr , sg - > length ) ;
2005-04-16 15:20:36 -07:00
}
return nents ;
}
EXPORT_SYMBOL ( dma_map_sg ) ;
dma_addr_t dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) ) {
unsigned long addr ;
addr = ( unsigned long ) page_address ( page ) + offset ;
dma_cache_wback_inv ( addr , size ) ;
}
2005-04-16 15:20:36 -07:00
2006-11-16 02:56:12 +00:00
return plat_map_dma_mem_page ( dev , page ) + offset ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_map_page ) ;
void dma_unmap_page ( struct device * dev , dma_addr_t dma_address , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) & & direction ! = DMA_TO_DEVICE ) {
2005-04-16 15:20:36 -07:00
unsigned long addr ;
2006-11-16 02:56:12 +00:00
addr = plat_dma_addr_to_phys ( dma_address ) ;
2005-04-16 15:20:36 -07:00
dma_cache_wback_inv ( addr , size ) ;
}
2006-11-16 02:56:12 +00:00
plat_unmap_dma_mem ( dma_address ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_unmap_page ) ;
void dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nhwentries ,
enum dma_data_direction direction )
{
unsigned long addr ;
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
for ( i = 0 ; i < nhwentries ; i + + , sg + + ) {
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) & &
direction ! = DMA_TO_DEVICE ) {
2007-10-22 20:02:46 +02:00
addr = ( unsigned long ) sg_virt ( sg ) ;
2006-11-16 02:56:12 +00:00
if ( addr )
2007-10-22 20:02:46 +02:00
__dma_sync ( addr , sg - > length , direction ) ;
2006-11-16 02:56:12 +00:00
}
plat_unmap_dma_mem ( sg - > dma_address ) ;
2005-04-16 15:20:36 -07:00
}
}
EXPORT_SYMBOL ( dma_unmap_sg ) ;
void dma_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
2005-09-03 15:56:17 -07:00
2006-11-16 02:56:12 +00:00
if ( cpu_is_noncoherent_r10000 ( dev ) ) {
unsigned long addr ;
2007-03-19 17:36:42 +01:00
addr = dma_addr_to_virt ( dma_handle ) ;
2006-11-16 02:56:12 +00:00
__dma_sync ( addr , size , direction ) ;
}
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_sync_single_for_cpu ) ;
void dma_sync_single_for_device ( struct device * dev , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
2007-02-23 19:58:48 +01:00
if ( ! plat_device_is_coherent ( dev ) ) {
2006-11-16 02:56:12 +00:00
unsigned long addr ;
2007-03-19 17:36:42 +01:00
addr = dma_addr_to_virt ( dma_handle ) ;
2006-11-16 02:56:12 +00:00
__dma_sync ( addr , size , direction ) ;
}
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_sync_single_for_device ) ;
void dma_sync_single_range_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size , enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
2006-11-16 02:56:12 +00:00
if ( cpu_is_noncoherent_r10000 ( dev ) ) {
unsigned long addr ;
2007-03-19 17:36:42 +01:00
addr = dma_addr_to_virt ( dma_handle ) ;
2006-11-16 02:56:12 +00:00
__dma_sync ( addr + offset , size , direction ) ;
}
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_sync_single_range_for_cpu ) ;
void dma_sync_single_range_for_device ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size , enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
2007-02-23 19:58:48 +01:00
if ( ! plat_device_is_coherent ( dev ) ) {
2006-11-16 02:56:12 +00:00
unsigned long addr ;
2007-03-19 17:36:42 +01:00
addr = dma_addr_to_virt ( dma_handle ) ;
2006-11-16 02:56:12 +00:00
__dma_sync ( addr + offset , size , direction ) ;
}
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_sync_single_range_for_device ) ;
void dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
int i ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
/* Make sure that gcc doesn't leave the empty loop body. */
2006-11-16 02:56:12 +00:00
for ( i = 0 ; i < nelems ; i + + , sg + + ) {
2007-03-02 11:42:11 +00:00
if ( cpu_is_noncoherent_r10000 ( dev ) )
2007-10-22 20:02:46 +02:00
__dma_sync ( ( unsigned long ) page_address ( sg_page ( sg ) ) ,
2006-11-16 02:56:12 +00:00
sg - > length , direction ) ;
plat_unmap_dma_mem ( sg - > dma_address ) ;
}
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_sync_sg_for_cpu ) ;
void dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
/* Make sure that gcc doesn't leave the empty loop body. */
2006-11-16 02:56:12 +00:00
for ( i = 0 ; i < nelems ; i + + , sg + + ) {
if ( ! plat_device_is_coherent ( dev ) )
2007-10-22 20:02:46 +02:00
__dma_sync ( ( unsigned long ) page_address ( sg_page ( sg ) ) ,
2006-11-16 02:56:12 +00:00
sg - > length , direction ) ;
plat_unmap_dma_mem ( sg - > dma_address ) ;
}
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_sync_sg_for_device ) ;
int dma_mapping_error ( dma_addr_t dma_addr )
{
return 0 ;
}
EXPORT_SYMBOL ( dma_mapping_error ) ;
int dma_supported ( struct device * dev , u64 mask )
{
/*
* we fall back to GFP_DMA when the mask isn ' t all 1 s ,
* so we can ' t guarantee allocations that must be
* within a tighter range than GFP_DMA . .
*/
2007-11-03 02:05:43 +00:00
if ( mask < DMA_BIT_MASK ( 24 ) )
2005-04-16 15:20:36 -07:00
return 0 ;
return 1 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2006-12-06 20:38:54 -08:00
int dma_is_consistent ( struct device * dev , dma_addr_t dma_addr )
2005-04-16 15:20:36 -07:00
{
2006-11-16 02:56:12 +00:00
return plat_device_is_coherent ( dev ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_is_consistent ) ;
2006-12-06 20:38:56 -08:00
void dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
2006-11-16 02:56:12 +00:00
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2006-11-16 02:56:12 +00:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-04-16 15:20:36 -07:00
2006-11-16 02:56:12 +00:00
if ( ! plat_device_is_coherent ( dev ) )
dma_cache_wback_inv ( ( unsigned long ) vaddr , size ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_cache_sync ) ;