2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2000 Ani Joshi < ajoshi @ unixbox . com >
* Copyright ( C ) 2000 , 2001 Ralf Baechle < ralf @ gnu . org >
* swiped from i386 , and cloned for MIPS by Geert , polished by Ralf .
*/
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/string.h>
# include <linux/dma-mapping.h>
# include <asm/cache.h>
# include <asm/io.h>
/*
* Warning on the terminology - Linux calls an uncached area coherent ;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent .
*/
void * dma_alloc_noncoherent ( struct device * dev , size_t size ,
2005-10-21 03:21:23 -04:00
dma_addr_t * dma_handle , gfp_t gfp )
2005-04-16 15:20:36 -07:00
{
void * ret ;
/* ignore region specifiers */
gfp & = ~ ( __GFP_DMA | __GFP_HIGHMEM ) ;
if ( dev = = NULL | | ( dev - > coherent_dma_mask < 0xffffffff ) )
gfp | = GFP_DMA ;
ret = ( void * ) __get_free_pages ( gfp , get_order ( size ) ) ;
if ( ret ! = NULL ) {
memset ( ret , 0 , size ) ;
* dma_handle = virt_to_phys ( ret ) ;
}
return ret ;
}
EXPORT_SYMBOL ( dma_alloc_noncoherent ) ;
void * dma_alloc_coherent ( struct device * dev , size_t size ,
2005-10-21 03:21:23 -04:00
dma_addr_t * dma_handle , gfp_t gfp )
2005-04-16 15:20:36 -07:00
{
void * ret ;
ret = dma_alloc_noncoherent ( dev , size , dma_handle , gfp ) ;
if ( ret ) {
dma_cache_wback_inv ( ( unsigned long ) ret , size ) ;
ret = UNCAC_ADDR ( ret ) ;
}
return ret ;
}
EXPORT_SYMBOL ( dma_alloc_coherent ) ;
void dma_free_noncoherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle )
{
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
EXPORT_SYMBOL ( dma_free_noncoherent ) ;
void dma_free_coherent ( struct device * dev , size_t size , void * vaddr ,
dma_addr_t dma_handle )
{
unsigned long addr = ( unsigned long ) vaddr ;
addr = CAC_ADDR ( addr ) ;
free_pages ( addr , get_order ( size ) ) ;
}
EXPORT_SYMBOL ( dma_free_coherent ) ;
static inline void __dma_sync ( unsigned long addr , size_t size ,
enum dma_data_direction direction )
{
switch ( direction ) {
case DMA_TO_DEVICE :
dma_cache_wback ( addr , size ) ;
break ;
case DMA_FROM_DEVICE :
dma_cache_inv ( addr , size ) ;
break ;
case DMA_BIDIRECTIONAL :
dma_cache_wback_inv ( addr , size ) ;
break ;
default :
BUG ( ) ;
}
}
dma_addr_t dma_map_single ( struct device * dev , void * ptr , size_t size ,
enum dma_data_direction direction )
{
unsigned long addr = ( unsigned long ) ptr ;
2005-09-09 17:09:18 +00:00
__dma_sync ( addr , size , direction ) ;
2005-04-16 15:20:36 -07:00
return virt_to_phys ( ptr ) ;
}
EXPORT_SYMBOL ( dma_map_single ) ;
void dma_unmap_single ( struct device * dev , dma_addr_t dma_addr , size_t size ,
enum dma_data_direction direction )
{
unsigned long addr ;
addr = dma_addr + PAGE_OFFSET ;
2005-09-09 17:09:18 +00:00
//__dma_sync(addr, size, direction);
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dma_unmap_single ) ;
int dma_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction direction )
{
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
for ( i = 0 ; i < nents ; i + + , sg + + ) {
unsigned long addr ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
addr = ( unsigned long ) page_address ( sg - > page ) ;
2005-09-09 17:09:18 +00:00
if ( addr ) {
2005-04-16 15:20:36 -07:00
__dma_sync ( addr + sg - > offset , sg - > length , direction ) ;
2005-09-09 17:09:18 +00:00
sg - > dma_address = ( dma_addr_t ) page_to_phys ( sg - > page )
+ sg - > offset ;
}
2005-04-16 15:20:36 -07:00
}
return nents ;
}
EXPORT_SYMBOL ( dma_map_sg ) ;
dma_addr_t dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction direction )
{
unsigned long addr ;
BUG_ON ( direction = = DMA_NONE ) ;
addr = ( unsigned long ) page_address ( page ) + offset ;
dma_cache_wback_inv ( addr , size ) ;
return page_to_phys ( page ) + offset ;
}
EXPORT_SYMBOL ( dma_map_page ) ;
void dma_unmap_page ( struct device * dev , dma_addr_t dma_address , size_t size ,
enum dma_data_direction direction )
{
BUG_ON ( direction = = DMA_NONE ) ;
if ( direction ! = DMA_TO_DEVICE ) {
unsigned long addr ;
addr = dma_address + PAGE_OFFSET ;
dma_cache_wback_inv ( addr , size ) ;
}
}
EXPORT_SYMBOL ( dma_unmap_page ) ;
void dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nhwentries ,
enum dma_data_direction direction )
{
unsigned long addr ;
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
if ( direction = = DMA_TO_DEVICE )
return ;
for ( i = 0 ; i < nhwentries ; i + + , sg + + ) {
addr = ( unsigned long ) page_address ( sg - > page ) ;
2005-09-09 17:09:18 +00:00
if ( addr )
__dma_sync ( addr + sg - > offset , sg - > length , direction ) ;
2005-04-16 15:20:36 -07:00
}
}
EXPORT_SYMBOL ( dma_unmap_sg ) ;
void dma_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction )
{
unsigned long addr ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
addr = dma_handle + PAGE_OFFSET ;
__dma_sync ( addr , size , direction ) ;
}
EXPORT_SYMBOL ( dma_sync_single_for_cpu ) ;
void dma_sync_single_for_device ( struct device * dev , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction )
{
unsigned long addr ;
BUG_ON ( direction = = DMA_NONE ) ;
addr = dma_handle + PAGE_OFFSET ;
__dma_sync ( addr , size , direction ) ;
}
EXPORT_SYMBOL ( dma_sync_single_for_device ) ;
void dma_sync_single_range_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size , enum dma_data_direction direction )
{
unsigned long addr ;
BUG_ON ( direction = = DMA_NONE ) ;
addr = dma_handle + offset + PAGE_OFFSET ;
__dma_sync ( addr , size , direction ) ;
}
EXPORT_SYMBOL ( dma_sync_single_range_for_cpu ) ;
void dma_sync_single_range_for_device ( struct device * dev , dma_addr_t dma_handle ,
unsigned long offset , size_t size , enum dma_data_direction direction )
{
unsigned long addr ;
BUG_ON ( direction = = DMA_NONE ) ;
addr = dma_handle + offset + PAGE_OFFSET ;
__dma_sync ( addr , size , direction ) ;
}
EXPORT_SYMBOL ( dma_sync_single_range_for_device ) ;
void dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
int i ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-09-03 15:56:17 -07:00
2005-04-16 15:20:36 -07:00
/* Make sure that gcc doesn't leave the empty loop body. */
for ( i = 0 ; i < nelems ; i + + , sg + + )
__dma_sync ( ( unsigned long ) page_address ( sg - > page ) ,
sg - > length , direction ) ;
}
EXPORT_SYMBOL ( dma_sync_sg_for_cpu ) ;
void dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg , int nelems ,
enum dma_data_direction direction )
{
int i ;
BUG_ON ( direction = = DMA_NONE ) ;
/* Make sure that gcc doesn't leave the empty loop body. */
for ( i = 0 ; i < nelems ; i + + , sg + + )
__dma_sync ( ( unsigned long ) page_address ( sg - > page ) ,
sg - > length , direction ) ;
}
EXPORT_SYMBOL ( dma_sync_sg_for_device ) ;
int dma_mapping_error ( dma_addr_t dma_addr )
{
return 0 ;
}
EXPORT_SYMBOL ( dma_mapping_error ) ;
int dma_supported ( struct device * dev , u64 mask )
{
/*
* we fall back to GFP_DMA when the mask isn ' t all 1 s ,
* so we can ' t guarantee allocations that must be
* within a tighter range than GFP_DMA . .
*/
if ( mask < 0x00ffffff )
return 0 ;
return 1 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2006-12-06 20:38:54 -08:00
int dma_is_consistent ( struct device * dev , dma_addr_t dma_addr )
2005-04-16 15:20:36 -07:00
{
return 1 ;
}
EXPORT_SYMBOL ( dma_is_consistent ) ;
void dma_cache_sync ( void * vaddr , size_t size , enum dma_data_direction direction )
{
if ( direction = = DMA_NONE )
return ;
dma_cache_wback_inv ( ( unsigned long ) vaddr , size ) ;
}
EXPORT_SYMBOL ( dma_cache_sync ) ;
/* The DAC routines are a PCIism.. */
# ifdef CONFIG_PCI
# include <linux/pci.h>
dma64_addr_t pci_dac_page_to_dma ( struct pci_dev * pdev ,
struct page * page , unsigned long offset , int direction )
{
return ( dma64_addr_t ) page_to_phys ( page ) + offset ;
}
EXPORT_SYMBOL ( pci_dac_page_to_dma ) ;
struct page * pci_dac_dma_to_page ( struct pci_dev * pdev ,
dma64_addr_t dma_addr )
{
return mem_map + ( dma_addr > > PAGE_SHIFT ) ;
}
EXPORT_SYMBOL ( pci_dac_dma_to_page ) ;
unsigned long pci_dac_dma_to_offset ( struct pci_dev * pdev ,
dma64_addr_t dma_addr )
{
return dma_addr & ~ PAGE_MASK ;
}
EXPORT_SYMBOL ( pci_dac_dma_to_offset ) ;
void pci_dac_dma_sync_single_for_cpu ( struct pci_dev * pdev ,
dma64_addr_t dma_addr , size_t len , int direction )
{
BUG_ON ( direction = = PCI_DMA_NONE ) ;
dma_cache_wback_inv ( dma_addr + PAGE_OFFSET , len ) ;
}
EXPORT_SYMBOL ( pci_dac_dma_sync_single_for_cpu ) ;
void pci_dac_dma_sync_single_for_device ( struct pci_dev * pdev ,
dma64_addr_t dma_addr , size_t len , int direction )
{
BUG_ON ( direction = = PCI_DMA_NONE ) ;
dma_cache_wback_inv ( dma_addr + PAGE_OFFSET , len ) ;
}
EXPORT_SYMBOL ( pci_dac_dma_sync_single_for_device ) ;
# endif /* CONFIG_PCI */