2005-04-16 15:20:36 -07:00
/*
2006-11-11 17:25:02 +11:00
* Copyright ( C ) 2006 Benjamin Herrenschmidt , IBM Corporation
2005-04-16 15:20:36 -07:00
*
2006-11-11 17:25:02 +11:00
* Provide default implementations of the DMA mapping callbacks for
* directly mapped busses and busses using the iommu infrastructure
2005-04-16 15:20:36 -07:00
*/
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <asm/bug.h>
2006-11-11 17:25:02 +11:00
# include <asm/iommu.h>
# include <asm/abs_addr.h>
2005-04-16 15:20:36 -07:00
2006-11-11 17:25:02 +11:00
/*
* Generic iommu implementation
*/
2005-04-16 15:20:36 -07:00
2006-11-11 17:25:02 +11:00
static inline unsigned long device_to_mask ( struct device * dev )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
if ( dev - > dma_mask & & * dev - > dma_mask )
return * dev - > dma_mask ;
/* Assume devices without mask can take 32 bit addresses */
return 0xfffffffful ;
}
2005-04-16 15:20:36 -07:00
2006-07-13 16:32:52 +10:00
2006-11-11 17:25:02 +11:00
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address ( mapping ) of the first page .
*/
static void * dma_iommu_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag )
{
return iommu_alloc_coherent ( dev - > archdata . dma_data , size , dma_handle ,
device_to_mask ( dev ) , flag ,
dev - > archdata . numa_node ) ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
static void dma_iommu_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
iommu_free_coherent ( dev - > archdata . dma_data , size , vaddr , dma_handle ) ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage ( not vmalloc ) . The address of the buffer
* passed here is the kernel ( virtual ) address of the buffer . The buffer
* need not be page aligned , the dma_addr_t returned will point to the same
* byte within the page as vaddr .
*/
static dma_addr_t dma_iommu_map_single ( struct device * dev , void * vaddr ,
size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
return iommu_map_single ( dev - > archdata . dma_data , vaddr , size ,
device_to_mask ( dev ) , direction ) ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
static void dma_iommu_unmap_single ( struct device * dev , dma_addr_t dma_handle ,
size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
iommu_unmap_single ( dev - > archdata . dma_data , dma_handle , size , direction ) ;
}
2005-04-16 15:20:36 -07:00
2006-07-13 16:32:52 +10:00
2006-11-11 17:25:02 +11:00
static int dma_iommu_map_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction )
{
return iommu_map_sg ( dev - > archdata . dma_data , sglist , nelems ,
device_to_mask ( dev ) , direction ) ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
static void dma_iommu_unmap_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
iommu_unmap_sg ( dev - > archdata . dma_data , sglist , nelems , direction ) ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
/* We support DMA to/from any memory page via the iommu */
static int dma_iommu_dma_supported ( struct device * dev , u64 mask )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
struct iommu_table * tbl = dev - > archdata . dma_data ;
if ( ! tbl | | tbl - > it_offset > mask ) {
printk ( KERN_INFO
" Warning: IOMMU offset too big for device mask \n " ) ;
if ( tbl )
printk ( KERN_INFO
" mask: 0x%08lx, table offset: 0x%08lx \n " ,
mask , tbl - > it_offset ) ;
else
printk ( KERN_INFO " mask: 0x%08lx, table unavailable \n " ,
mask ) ;
return 0 ;
} else
return 1 ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
struct dma_mapping_ops dma_iommu_ops = {
. alloc_coherent = dma_iommu_alloc_coherent ,
. free_coherent = dma_iommu_free_coherent ,
. map_single = dma_iommu_map_single ,
. unmap_single = dma_iommu_unmap_single ,
. map_sg = dma_iommu_map_sg ,
. unmap_sg = dma_iommu_unmap_sg ,
. dma_supported = dma_iommu_dma_supported ,
} ;
EXPORT_SYMBOL ( dma_iommu_ops ) ;
2005-04-16 15:20:36 -07:00
2006-11-11 17:25:02 +11:00
/*
* Generic direct DMA implementation
2006-11-11 17:25:14 +11:00
*
* This implementation supports a global offset that can be applied if
* the address at which memory is visible to devices is not 0.
2006-11-11 17:25:02 +11:00
*/
2006-11-11 17:25:14 +11:00
unsigned long dma_direct_offset ;
2006-07-13 16:32:52 +10:00
2006-11-11 17:25:02 +11:00
static void * dma_direct_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag )
{
2006-11-11 17:25:16 +11:00
struct page * page ;
2006-11-11 17:25:02 +11:00
void * ret ;
2006-11-11 17:25:16 +11:00
int node = dev - > archdata . numa_node ;
2006-11-11 17:25:02 +11:00
/* TODO: Maybe use the numa node here too ? */
2006-11-11 17:25:16 +11:00
page = alloc_pages_node ( node , flag , get_order ( size ) ) ;
if ( page = = NULL )
return NULL ;
ret = page_address ( page ) ;
memset ( ret , 0 , size ) ;
* dma_handle = virt_to_abs ( ret ) | dma_direct_offset ;
2006-11-11 17:25:02 +11:00
return ret ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
static void dma_direct_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
2005-04-16 15:20:36 -07:00
2006-11-11 17:25:02 +11:00
static dma_addr_t dma_direct_map_single ( struct device * dev , void * ptr ,
size_t size ,
enum dma_data_direction direction )
{
2006-11-11 17:25:14 +11:00
return virt_to_abs ( ptr ) | dma_direct_offset ;
2006-11-11 17:25:02 +11:00
}
2006-07-13 16:32:52 +10:00
2006-11-11 17:25:02 +11:00
static void dma_direct_unmap_single ( struct device * dev , dma_addr_t dma_addr ,
size_t size ,
enum dma_data_direction direction )
{
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
static int dma_direct_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
int i ;
2005-04-16 15:20:36 -07:00
2006-11-11 17:25:02 +11:00
for ( i = 0 ; i < nents ; i + + , sg + + ) {
2006-11-11 17:25:14 +11:00
sg - > dma_address = ( page_to_phys ( sg - > page ) + sg - > offset ) |
dma_direct_offset ;
2006-11-11 17:25:02 +11:00
sg - > dma_length = sg - > length ;
}
2006-07-13 16:32:52 +10:00
2006-11-11 17:25:02 +11:00
return nents ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
static void dma_direct_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2006-11-11 17:25:02 +11:00
}
2006-07-13 16:32:52 +10:00
2006-11-11 17:25:02 +11:00
static int dma_direct_dma_supported ( struct device * dev , u64 mask )
{
/* Could be improved to check for memory though it better be
* done via some global so platforms can set the limit in case
* they have limited DMA windows
*/
return mask > = DMA_32BIT_MASK ;
2005-04-16 15:20:36 -07:00
}
2006-11-11 17:25:02 +11:00
struct dma_mapping_ops dma_direct_ops = {
. alloc_coherent = dma_direct_alloc_coherent ,
. free_coherent = dma_direct_free_coherent ,
. map_single = dma_direct_map_single ,
. unmap_single = dma_direct_unmap_single ,
. map_sg = dma_direct_map_sg ,
. unmap_sg = dma_direct_unmap_sg ,
. dma_supported = dma_direct_dma_supported ,
} ;
EXPORT_SYMBOL ( dma_direct_ops ) ;