2008-09-08 13:09:53 +04:00
/*
* Copyright ( C ) 2006 Benjamin Herrenschmidt , IBM Corporation
*
* Provide default implementations of the DMA mapping callbacks for
* busses using the iommu infrastructure
*/
2011-05-27 18:46:24 +04:00
# include <linux/export.h>
2008-09-08 13:09:53 +04:00
# include <asm/iommu.h>
/*
* Generic iommu implementation
*/
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address ( mapping ) of the first page .
*/
static void * dma_iommu_alloc_coherent ( struct device * dev , size_t size ,
2011-12-06 17:14:46 +04:00
dma_addr_t * dma_handle , gfp_t flag ,
struct dma_attrs * attrs )
2008-09-08 13:09:53 +04:00
{
2009-09-21 12:26:35 +04:00
return iommu_alloc_coherent ( dev , get_iommu_table_base ( dev ) , size ,
2010-10-18 11:27:04 +04:00
dma_handle , dev - > coherent_dma_mask , flag ,
2008-09-08 13:09:54 +04:00
dev_to_node ( dev ) ) ;
2008-09-08 13:09:53 +04:00
}
static void dma_iommu_free_coherent ( struct device * dev , size_t size ,
2011-12-06 17:14:46 +04:00
void * vaddr , dma_addr_t dma_handle ,
struct dma_attrs * attrs )
2008-09-08 13:09:53 +04:00
{
2009-09-21 12:26:35 +04:00
iommu_free_coherent ( get_iommu_table_base ( dev ) , size , vaddr , dma_handle ) ;
2008-09-08 13:09:53 +04:00
}
/* Creates TCEs for a user provided buffer. The user buffer must be
2008-10-27 23:38:08 +03:00
* contiguous real kernel storage ( not vmalloc ) . The address passed here
* comprises a page address and offset into that page . The dma_addr_t
* returned will point to the same byte within the page as was passed in .
2008-09-08 13:09:53 +04:00
*/
2008-10-27 23:38:08 +03:00
static dma_addr_t dma_iommu_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2008-09-08 13:09:53 +04:00
{
2009-09-21 12:26:35 +04:00
return iommu_map_page ( dev , get_iommu_table_base ( dev ) , page , offset ,
size , device_to_mask ( dev ) , direction , attrs ) ;
2008-09-08 13:09:53 +04:00
}
2008-10-27 23:38:08 +03:00
static void dma_iommu_unmap_page ( struct device * dev , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2008-09-08 13:09:53 +04:00
{
2009-09-21 12:26:35 +04:00
iommu_unmap_page ( get_iommu_table_base ( dev ) , dma_handle , size , direction ,
2008-10-27 23:38:08 +03:00
attrs ) ;
2008-09-08 13:09:53 +04:00
}
static int dma_iommu_map_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
2009-09-21 12:26:35 +04:00
return iommu_map_sg ( dev , get_iommu_table_base ( dev ) , sglist , nelems ,
2008-09-08 13:09:53 +04:00
device_to_mask ( dev ) , direction , attrs ) ;
}
static void dma_iommu_unmap_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
2009-09-21 12:26:35 +04:00
iommu_unmap_sg ( get_iommu_table_base ( dev ) , sglist , nelems , direction ,
2008-09-08 13:09:53 +04:00
attrs ) ;
}
/* We support DMA to/from any memory page via the iommu */
static int dma_iommu_dma_supported ( struct device * dev , u64 mask )
{
2009-09-21 12:26:35 +04:00
struct iommu_table * tbl = get_iommu_table_base ( dev ) ;
2008-09-08 13:09:53 +04:00
2010-09-15 12:05:45 +04:00
if ( ! tbl ) {
dev_info ( dev , " Warning: IOMMU dma not supported: mask 0x%08llx "
" , table unavailable \n " , mask ) ;
return 0 ;
}
2012-08-18 11:34:15 +04:00
if ( tbl - > it_offset > ( mask > > IOMMU_PAGE_SHIFT ) ) {
dev_info ( dev , " Warning: IOMMU offset too big for device mask \n " ) ;
dev_info ( dev , " mask: 0x%08llx, table offset: 0x%08lx \n " ,
mask , tbl - > it_offset < < IOMMU_PAGE_SHIFT ) ;
2008-09-08 13:09:53 +04:00
return 0 ;
} else
return 1 ;
}
2011-06-24 13:05:24 +04:00
static u64 dma_iommu_get_required_mask ( struct device * dev )
2011-06-24 13:05:22 +04:00
{
struct iommu_table * tbl = get_iommu_table_base ( dev ) ;
u64 mask ;
if ( ! tbl )
return 0 ;
mask = 1ULL < ( fls_long ( tbl - > it_offset + tbl - > it_size ) - 1 ) ;
mask + = mask - 1 ;
return mask ;
}
2009-08-04 23:08:25 +04:00
struct dma_map_ops dma_iommu_ops = {
2011-12-06 17:14:46 +04:00
. alloc = dma_iommu_alloc_coherent ,
. free = dma_iommu_free_coherent ,
2012-06-14 15:03:04 +04:00
. mmap = dma_direct_mmap_coherent ,
2011-06-24 13:05:25 +04:00
. map_sg = dma_iommu_map_sg ,
. unmap_sg = dma_iommu_unmap_sg ,
. dma_supported = dma_iommu_dma_supported ,
. map_page = dma_iommu_map_page ,
. unmap_page = dma_iommu_unmap_page ,
2011-06-24 13:05:24 +04:00
. get_required_mask = dma_iommu_get_required_mask ,
2008-09-08 13:09:53 +04:00
} ;
EXPORT_SYMBOL ( dma_iommu_ops ) ;