2005-04-17 02:20:36 +04:00
/*
2008-09-25 18:59:19 +04:00
* linux / arch / arm / mm / dma - mapping . c
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 2000 - 2004 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* DMA uncached mapping support .
*/
# include <linux/module.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/errno.h>
# include <linux/list.h>
# include <linux/init.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
2006-04-02 03:07:39 +04:00
# include <asm/memory.h>
2009-03-13 05:52:09 +03:00
# include <asm/highmem.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
2006-01-12 19:12:21 +03:00
# include <asm/sizes.h>
/* Sanity check size */
# if (CONSISTENT_DMA_SIZE % SZ_2M)
# error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
# endif
2005-04-17 02:20:36 +04:00
# define CONSISTENT_END (0xffe00000)
2006-01-12 19:12:21 +03:00
# define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
2005-04-17 02:20:36 +04:00
# define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
2006-01-12 19:12:21 +03:00
# define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
# define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
2009-07-24 15:35:02 +04:00
static u64 get_coherent_dma_mask ( struct device * dev )
{
u64 mask = ISA_DMA_THRESHOLD ;
if ( dev ) {
mask = dev - > coherent_dma_mask ;
/*
* Sanity check the DMA mask - it must be non - zero , and
* must be able to be satisfied by a DMA allocation .
*/
if ( mask = = 0 ) {
dev_warn ( dev , " coherent DMA mask is unset \n " ) ;
return 0 ;
}
if ( ( ~ mask ) & ISA_DMA_THRESHOLD ) {
dev_warn ( dev , " coherent DMA mask %#llx is smaller "
" than system GFP_DMA mask %#llx \n " ,
mask , ( unsigned long long ) ISA_DMA_THRESHOLD ) ;
return 0 ;
}
}
2005-04-17 02:20:36 +04:00
2009-07-24 15:35:02 +04:00
return mask ;
}
2009-11-19 18:31:07 +03:00
/*
* Allocate a DMA buffer for ' dev ' of size ' size ' using the
* specified gfp mask . Note that ' size ' must be page aligned .
*/
static struct page * __dma_alloc_buffer ( struct device * dev , size_t size , gfp_t gfp )
{
unsigned long order = get_order ( size ) ;
struct page * page , * p , * e ;
void * ptr ;
u64 mask = get_coherent_dma_mask ( dev ) ;
# ifdef CONFIG_DMA_API_DEBUG
u64 limit = ( mask + 1 ) & ~ mask ;
if ( limit & & size > = limit ) {
dev_warn ( dev , " coherent allocation too big (requested %#x mask %#llx) \n " ,
size , mask ) ;
return NULL ;
}
# endif
if ( ! mask )
return NULL ;
if ( mask < 0xffffffffULL )
gfp | = GFP_DMA ;
page = alloc_pages ( gfp , order ) ;
if ( ! page )
return NULL ;
/*
* Now split the huge page and free the excess pages
*/
split_page ( page , order ) ;
for ( p = page + ( size > > PAGE_SHIFT ) , e = page + ( 1 < < order ) ; p < e ; p + + )
__free_page ( p ) ;
/*
* Ensure that the allocated pages are zeroed , and that any data
* lurking in the kernel direct - mapped region is invalidated .
*/
ptr = page_address ( page ) ;
memset ( ptr , 0 , size ) ;
dmac_flush_range ( ptr , ptr + size ) ;
outer_flush_range ( __pa ( ptr ) , __pa ( ptr ) + size ) ;
return page ;
}
/*
* Free a DMA buffer . ' size ' must be page aligned .
*/
static void __dma_free_buffer ( struct page * page , size_t size )
{
struct page * e = page + ( size > > PAGE_SHIFT ) ;
while ( page < e ) {
__free_page ( page ) ;
page + + ;
}
}
2009-07-24 15:35:02 +04:00
# ifdef CONFIG_MMU
2005-04-17 02:20:36 +04:00
/*
2006-01-12 19:12:21 +03:00
* These are the page tables ( 2 MB each ) covering uncached , DMA consistent allocations
2005-04-17 02:20:36 +04:00
*/
2006-01-12 19:12:21 +03:00
static pte_t * consistent_pte [ NUM_CONSISTENT_PTES ] ;
2005-04-17 02:20:36 +04:00
2009-11-19 18:07:04 +03:00
# include "vmregion.h"
2005-04-17 02:20:36 +04:00
2009-11-19 18:07:04 +03:00
static struct arm_vmregion_head consistent_head = {
. vm_lock = __SPIN_LOCK_UNLOCKED ( & consistent_head . vm_lock ) ,
2005-04-17 02:20:36 +04:00
. vm_list = LIST_HEAD_INIT ( consistent_head . vm_list ) ,
. vm_start = CONSISTENT_BASE ,
. vm_end = CONSISTENT_END ,
} ;
# ifdef CONFIG_HUGETLB_PAGE
# error ARM Coherent DMA allocator does not (yet) support huge TLB
# endif
static void *
2005-10-21 11:20:58 +04:00
__dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp ,
2005-04-17 02:20:36 +04:00
pgprot_t prot )
{
struct page * page ;
2009-11-19 18:07:04 +03:00
struct arm_vmregion * c ;
2005-04-17 02:20:36 +04:00
2006-01-12 19:12:21 +03:00
if ( ! consistent_pte [ 0 ] ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " %s: not initialised \n " , __func__ ) ;
dump_stack ( ) ;
return NULL ;
}
size = PAGE_ALIGN ( size ) ;
2009-11-19 18:31:07 +03:00
page = __dma_alloc_buffer ( dev , size , gfp ) ;
2005-04-17 02:20:36 +04:00
if ( ! page )
goto no_page ;
/*
* Allocate a virtual address in the consistent mapping region .
*/
2009-11-19 18:07:04 +03:00
c = arm_vmregion_alloc ( & consistent_head , size ,
2005-04-17 02:20:36 +04:00
gfp & ~ ( __GFP_DMA | __GFP_HIGHMEM ) ) ;
if ( c ) {
2006-01-12 19:12:21 +03:00
pte_t * pte ;
int idx = CONSISTENT_PTE_INDEX ( c - > vm_start ) ;
u32 off = CONSISTENT_OFFSET ( c - > vm_start ) & ( PTRS_PER_PTE - 1 ) ;
2005-04-17 02:20:36 +04:00
2006-01-12 19:12:21 +03:00
pte = consistent_pte [ idx ] + off ;
2005-04-17 02:20:36 +04:00
c - > vm_pages = page ;
/*
* Set the " dma handle "
*/
* handle = page_to_dma ( dev , page ) ;
do {
BUG_ON ( ! pte_none ( * pte ) ) ;
/*
* x86 does not mark the pages reserved . . .
*/
SetPageReserved ( page ) ;
2006-12-13 17:34:43 +03:00
set_pte_ext ( pte , mk_pte ( page , prot ) , 0 ) ;
2005-04-17 02:20:36 +04:00
page + + ;
pte + + ;
2006-01-12 19:12:21 +03:00
off + + ;
if ( off > = PTRS_PER_PTE ) {
off = 0 ;
pte = consistent_pte [ + + idx ] ;
}
2005-04-17 02:20:36 +04:00
} while ( size - = PAGE_SIZE ) ;
return ( void * ) c - > vm_start ;
}
if ( page )
2009-11-19 18:31:07 +03:00
__dma_free_buffer ( page , size ) ;
2005-04-17 02:20:36 +04:00
no_page :
* handle = ~ 0 ;
return NULL ;
}
2009-07-24 15:35:02 +04:00
# else /* !CONFIG_MMU */
static void *
__dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp ,
pgprot_t prot )
{
void * virt ;
u64 mask = get_coherent_dma_mask ( dev ) ;
if ( ! mask )
goto error ;
2009-10-26 01:36:10 +03:00
if ( mask < 0xffffffffULL )
2009-07-24 15:35:02 +04:00
gfp | = GFP_DMA ;
virt = kmalloc ( size , gfp ) ;
if ( ! virt )
goto error ;
* handle = virt_to_dma ( dev , virt ) ;
return virt ;
error :
* handle = ~ 0 ;
return NULL ;
}
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
/*
* Allocate DMA - coherent memory space and return both the kernel remapped
* virtual and bus address for that space .
*/
void *
2005-10-21 11:20:58 +04:00
dma_alloc_coherent ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
2008-07-18 13:30:14 +04:00
void * memory ;
if ( dma_alloc_from_coherent ( dev , size , handle , & memory ) )
return memory ;
2006-04-02 03:07:39 +04:00
if ( arch_is_coherent ( ) ) {
2009-11-19 18:38:12 +03:00
struct page * page ;
2006-04-02 03:07:39 +04:00
2009-11-19 18:38:12 +03:00
page = __dma_alloc_buffer ( dev , PAGE_ALIGN ( size ) , gfp ) ;
if ( ! page ) {
* handle = ~ 0 ;
2006-04-02 03:07:39 +04:00
return NULL ;
2009-11-19 18:38:12 +03:00
}
2006-04-02 03:07:39 +04:00
2009-11-19 18:38:12 +03:00
* handle = page_to_dma ( dev , page ) ;
return page_address ( page ) ;
2006-04-02 03:07:39 +04:00
}
2005-04-17 02:20:36 +04:00
return __dma_alloc ( dev , size , handle , gfp ,
pgprot_noncached ( pgprot_kernel ) ) ;
}
EXPORT_SYMBOL ( dma_alloc_coherent ) ;
/*
* Allocate a writecombining region , in much the same way as
* dma_alloc_coherent above .
*/
void *
2005-10-21 11:20:58 +04:00
dma_alloc_writecombine ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
return __dma_alloc ( dev , size , handle , gfp ,
pgprot_writecombine ( pgprot_kernel ) ) ;
}
EXPORT_SYMBOL ( dma_alloc_writecombine ) ;
static int dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
2009-07-24 15:35:02 +04:00
int ret = - ENXIO ;
# ifdef CONFIG_MMU
2009-11-19 18:07:04 +03:00
unsigned long user_size , kern_size ;
struct arm_vmregion * c ;
2005-04-17 02:20:36 +04:00
user_size = ( vma - > vm_end - vma - > vm_start ) > > PAGE_SHIFT ;
2009-11-19 18:07:04 +03:00
c = arm_vmregion_find ( & consistent_head , ( unsigned long ) cpu_addr ) ;
2005-04-17 02:20:36 +04:00
if ( c ) {
unsigned long off = vma - > vm_pgoff ;
kern_size = ( c - > vm_end - c - > vm_start ) > > PAGE_SHIFT ;
if ( off < kern_size & &
user_size < = ( kern_size - off ) ) {
ret = remap_pfn_range ( vma , vma - > vm_start ,
page_to_pfn ( c - > vm_pages ) + off ,
user_size < < PAGE_SHIFT ,
vma - > vm_page_prot ) ;
}
}
2009-07-24 15:35:02 +04:00
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
return ret ;
}
int dma_mmap_coherent ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
return dma_mmap ( dev , vma , cpu_addr , dma_addr , size ) ;
}
EXPORT_SYMBOL ( dma_mmap_coherent ) ;
int dma_mmap_writecombine ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
vma - > vm_page_prot = pgprot_writecombine ( vma - > vm_page_prot ) ;
return dma_mmap ( dev , vma , cpu_addr , dma_addr , size ) ;
}
EXPORT_SYMBOL ( dma_mmap_writecombine ) ;
/*
* free a page as defined by the above mapping .
2005-11-25 18:52:51 +03:00
* Must not be called with IRQs disabled .
2005-04-17 02:20:36 +04:00
*/
2009-07-24 15:35:02 +04:00
# ifdef CONFIG_MMU
2005-04-17 02:20:36 +04:00
void dma_free_coherent ( struct device * dev , size_t size , void * cpu_addr , dma_addr_t handle )
{
2009-11-19 18:07:04 +03:00
struct arm_vmregion * c ;
unsigned long addr ;
2005-04-17 02:20:36 +04:00
pte_t * ptep ;
2006-01-12 19:12:21 +03:00
int idx ;
u32 off ;
2005-04-17 02:20:36 +04:00
2005-11-25 18:52:51 +03:00
WARN_ON ( irqs_disabled ( ) ) ;
2008-07-18 13:30:14 +04:00
if ( dma_release_from_coherent ( dev , get_order ( size ) , cpu_addr ) )
return ;
2009-11-19 18:38:12 +03:00
size = PAGE_ALIGN ( size ) ;
2006-04-02 03:07:39 +04:00
if ( arch_is_coherent ( ) ) {
2009-11-19 18:38:12 +03:00
__dma_free_buffer ( dma_to_page ( dev , handle ) , size ) ;
2006-04-02 03:07:39 +04:00
return ;
}
2009-11-19 18:07:04 +03:00
c = arm_vmregion_find_remove ( & consistent_head , ( unsigned long ) cpu_addr ) ;
2005-04-17 02:20:36 +04:00
if ( ! c )
goto no_area ;
if ( ( c - > vm_end - c - > vm_start ) ! = size ) {
printk ( KERN_ERR " %s: freeing wrong coherent size (%ld != %d) \n " ,
__func__ , c - > vm_end - c - > vm_start , size ) ;
dump_stack ( ) ;
size = c - > vm_end - c - > vm_start ;
}
2006-01-12 19:12:21 +03:00
idx = CONSISTENT_PTE_INDEX ( c - > vm_start ) ;
off = CONSISTENT_OFFSET ( c - > vm_start ) & ( PTRS_PER_PTE - 1 ) ;
ptep = consistent_pte [ idx ] + off ;
2005-04-17 02:20:36 +04:00
addr = c - > vm_start ;
do {
pte_t pte = ptep_get_and_clear ( & init_mm , addr , ptep ) ;
unsigned long pfn ;
ptep + + ;
addr + = PAGE_SIZE ;
2006-01-12 19:12:21 +03:00
off + + ;
if ( off > = PTRS_PER_PTE ) {
off = 0 ;
ptep = consistent_pte [ + + idx ] ;
}
2005-04-17 02:20:36 +04:00
if ( ! pte_none ( pte ) & & pte_present ( pte ) ) {
pfn = pte_pfn ( pte ) ;
if ( pfn_valid ( pfn ) ) {
struct page * page = pfn_to_page ( pfn ) ;
/*
* x86 does not mark the pages reserved . . .
*/
ClearPageReserved ( page ) ;
continue ;
}
}
printk ( KERN_CRIT " %s: bad page in kernel page table \n " ,
__func__ ) ;
} while ( size - = PAGE_SIZE ) ;
flush_tlb_kernel_range ( c - > vm_start , c - > vm_end ) ;
2009-11-19 18:07:04 +03:00
arm_vmregion_free ( & consistent_head , c ) ;
2009-11-19 18:31:07 +03:00
__dma_free_buffer ( dma_to_page ( dev , handle ) , size ) ;
2005-04-17 02:20:36 +04:00
return ;
no_area :
printk ( KERN_ERR " %s: trying to free invalid coherent area: %p \n " ,
__func__ , cpu_addr ) ;
dump_stack ( ) ;
}
2009-07-24 15:35:02 +04:00
# else /* !CONFIG_MMU */
void dma_free_coherent ( struct device * dev , size_t size , void * cpu_addr , dma_addr_t handle )
{
if ( dma_release_from_coherent ( dev , get_order ( size ) , cpu_addr ) )
return ;
kfree ( cpu_addr ) ;
}
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dma_free_coherent ) ;
/*
* Initialise the consistent memory allocation .
*/
static int __init consistent_init ( void )
{
2009-07-24 15:35:02 +04:00
int ret = 0 ;
# ifdef CONFIG_MMU
2005-04-17 02:20:36 +04:00
pgd_t * pgd ;
pmd_t * pmd ;
pte_t * pte ;
2009-07-24 15:35:02 +04:00
int i = 0 ;
2006-01-12 19:12:21 +03:00
u32 base = CONSISTENT_BASE ;
2005-04-17 02:20:36 +04:00
do {
2006-01-12 19:12:21 +03:00
pgd = pgd_offset ( & init_mm , base ) ;
pmd = pmd_alloc ( & init_mm , pgd , base ) ;
2005-04-17 02:20:36 +04:00
if ( ! pmd ) {
printk ( KERN_ERR " %s: no pmd tables \n " , __func__ ) ;
ret = - ENOMEM ;
break ;
}
WARN_ON ( ! pmd_none ( * pmd ) ) ;
2006-01-12 19:12:21 +03:00
pte = pte_alloc_kernel ( pmd , base ) ;
2005-04-17 02:20:36 +04:00
if ( ! pte ) {
printk ( KERN_ERR " %s: no pte tables \n " , __func__ ) ;
ret = - ENOMEM ;
break ;
}
2006-01-12 19:12:21 +03:00
consistent_pte [ i + + ] = pte ;
base + = ( 1 < < PGDIR_SHIFT ) ;
} while ( base < CONSISTENT_END ) ;
2009-07-24 15:35:02 +04:00
# endif /* !CONFIG_MMU */
2005-04-17 02:20:36 +04:00
return ret ;
}
core_initcall ( consistent_init ) ;
/*
* Make an area consistent for devices .
2006-11-22 00:57:23 +03:00
* Note : Drivers should NOT use this function directly , as it will break
* platforms with CONFIG_DMABOUNCE .
* Use the driver DMA support - see dma - mapping . h ( dma_sync_ * )
2005-04-17 02:20:36 +04:00
*/
2007-10-09 17:17:01 +04:00
void dma_cache_maint ( const void * start , size_t size , int direction )
2005-04-17 02:20:36 +04:00
{
2009-03-12 20:03:48 +03:00
void ( * inner_op ) ( const void * , const void * ) ;
void ( * outer_op ) ( unsigned long , unsigned long ) ;
2005-04-17 02:20:36 +04:00
2009-03-12 20:03:48 +03:00
BUG_ON ( ! virt_addr_valid ( start ) | | ! virt_addr_valid ( start + size - 1 ) ) ;
2007-02-05 16:48:08 +03:00
2005-04-17 02:20:36 +04:00
switch ( direction ) {
case DMA_FROM_DEVICE : /* invalidate only */
2009-03-12 20:03:48 +03:00
inner_op = dmac_inv_range ;
outer_op = outer_inv_range ;
2005-04-17 02:20:36 +04:00
break ;
case DMA_TO_DEVICE : /* writeback only */
2009-03-12 20:03:48 +03:00
inner_op = dmac_clean_range ;
outer_op = outer_clean_range ;
2005-04-17 02:20:36 +04:00
break ;
case DMA_BIDIRECTIONAL : /* writeback and invalidate */
2009-03-12 20:03:48 +03:00
inner_op = dmac_flush_range ;
outer_op = outer_flush_range ;
2005-04-17 02:20:36 +04:00
break ;
default :
BUG ( ) ;
}
2009-03-12 20:03:48 +03:00
inner_op ( start , start + size ) ;
outer_op ( __pa ( start ) , __pa ( start ) + size ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-09 17:17:01 +04:00
EXPORT_SYMBOL ( dma_cache_maint ) ;
2008-09-25 19:30:57 +04:00
2009-03-13 05:52:09 +03:00
static void dma_cache_maint_contiguous ( struct page * page , unsigned long offset ,
size_t size , int direction )
{
void * vaddr ;
unsigned long paddr ;
void ( * inner_op ) ( const void * , const void * ) ;
void ( * outer_op ) ( unsigned long , unsigned long ) ;
switch ( direction ) {
case DMA_FROM_DEVICE : /* invalidate only */
inner_op = dmac_inv_range ;
outer_op = outer_inv_range ;
break ;
case DMA_TO_DEVICE : /* writeback only */
inner_op = dmac_clean_range ;
outer_op = outer_clean_range ;
break ;
case DMA_BIDIRECTIONAL : /* writeback and invalidate */
inner_op = dmac_flush_range ;
outer_op = outer_flush_range ;
break ;
default :
BUG ( ) ;
}
if ( ! PageHighMem ( page ) ) {
vaddr = page_address ( page ) + offset ;
inner_op ( vaddr , vaddr + size ) ;
} else {
vaddr = kmap_high_get ( page ) ;
if ( vaddr ) {
vaddr + = offset ;
inner_op ( vaddr , vaddr + size ) ;
kunmap_high ( page ) ;
}
}
paddr = page_to_phys ( page ) + offset ;
outer_op ( paddr , paddr + size ) ;
}
void dma_cache_maint_page ( struct page * page , unsigned long offset ,
size_t size , int dir )
{
/*
* A single sg entry may refer to multiple physically contiguous
* pages . But we still need to process highmem pages individually .
* If highmem is not configured then the bulk of this loop gets
* optimized out .
*/
size_t left = size ;
do {
size_t len = left ;
if ( PageHighMem ( page ) & & len + offset > PAGE_SIZE ) {
if ( offset > = PAGE_SIZE ) {
page + = offset / PAGE_SIZE ;
offset % = PAGE_SIZE ;
}
len = PAGE_SIZE - offset ;
}
dma_cache_maint_contiguous ( page , offset , len , dir ) ;
offset = 0 ;
page + + ;
left - = len ;
} while ( left ) ;
}
EXPORT_SYMBOL ( dma_cache_maint_page ) ;
2008-09-25 19:30:57 +04:00
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ sg : list of buffers
* @ nents : number of buffers to map
* @ dir : DMA transfer direction
*
* Map a set of buffers described by scatterlist in streaming mode for DMA .
* This is the scatter - gather version of the dma_map_single interface .
* Here the scatter gather list elements are each tagged with the
* appropriate dma address and length . They are obtained via
* sg_dma_ { address , length } .
*
* Device ownership issues as mentioned for dma_map_single are the same
* here .
*/
int dma_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction dir )
{
struct scatterlist * s ;
2008-09-26 00:05:02 +04:00
int i , j ;
2008-09-25 19:30:57 +04:00
for_each_sg ( sg , s , nents , i ) {
2008-09-26 00:05:02 +04:00
s - > dma_address = dma_map_page ( dev , sg_page ( s ) , s - > offset ,
s - > length , dir ) ;
if ( dma_mapping_error ( dev , s - > dma_address ) )
goto bad_mapping ;
2008-09-25 19:30:57 +04:00
}
return nents ;
2008-09-26 00:05:02 +04:00
bad_mapping :
for_each_sg ( sg , s , i , j )
dma_unmap_page ( dev , sg_dma_address ( s ) , sg_dma_len ( s ) , dir ) ;
return 0 ;
2008-09-25 19:30:57 +04:00
}
EXPORT_SYMBOL ( dma_map_sg ) ;
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ sg : list of buffers
* @ nents : number of buffers to unmap ( returned from dma_map_sg )
* @ dir : DMA transfer direction ( same as was passed to dma_map_sg )
*
* Unmap a set of streaming mode DMA translations . Again , CPU access
* rules concerning calls here are the same as for dma_unmap_single ( ) .
*/
void dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction dir )
{
2008-09-26 00:05:02 +04:00
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nents , i )
dma_unmap_page ( dev , sg_dma_address ( s ) , sg_dma_len ( s ) , dir ) ;
2008-09-25 19:30:57 +04:00
}
EXPORT_SYMBOL ( dma_unmap_sg ) ;
/**
* dma_sync_sg_for_cpu
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ sg : list of buffers
* @ nents : number of buffers to map ( returned from dma_map_sg )
* @ dir : DMA transfer direction ( same as was passed to dma_map_sg )
*/
void dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir )
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nents , i ) {
2008-09-29 22:50:59 +04:00
dmabounce_sync_for_cpu ( dev , sg_dma_address ( s ) , 0 ,
sg_dma_len ( s ) , dir ) ;
2008-09-25 19:30:57 +04:00
}
}
EXPORT_SYMBOL ( dma_sync_sg_for_cpu ) ;
/**
* dma_sync_sg_for_device
* @ dev : valid struct device pointer , or NULL for ISA and EISA - like devices
* @ sg : list of buffers
* @ nents : number of buffers to map ( returned from dma_map_sg )
* @ dir : DMA transfer direction ( same as was passed to dma_map_sg )
*/
void dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir )
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nents , i ) {
2008-09-26 00:38:41 +04:00
if ( ! dmabounce_sync_for_device ( dev , sg_dma_address ( s ) , 0 ,
sg_dma_len ( s ) , dir ) )
continue ;
2008-09-25 19:30:57 +04:00
if ( ! arch_is_coherent ( ) )
2009-03-13 05:52:09 +03:00
dma_cache_maint_page ( sg_page ( s ) , s - > offset ,
s - > length , dir ) ;
2008-09-25 19:30:57 +04:00
}
}
EXPORT_SYMBOL ( dma_sync_sg_for_device ) ;