2005-04-17 02:20:36 +04:00
/*
* PowerPC version derived from arch / arm / mm / consistent . c
* Copyright ( C ) 2001 Dan Malek ( dmalek @ jlc . net )
*
* Copyright ( C ) 2000 Russell King
*
* Consistent memory allocators . Used for DMA devices that want to
* share uncached memory with the processor core . The function return
* is the virtual address and ' dma_handle ' is the physical address .
* Mostly stolen from the ARM port , with some changes for PowerPC .
* - - Dan
*
* Reorganized to get rid of the arch - specific consistent_ * functions
* and provide non - coherent implementations for the DMA API . - Matt
*
* Added in_interrupt ( ) safe dma_alloc_coherent ( ) / dma_free_coherent ( )
* implementation . This is pulled straight from ARM and barely
* modified . - Matt
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/highmem.h>
# include <linux/dma-mapping.h>
2009-02-12 16:20:53 +03:00
# include <linux/vmalloc.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
/*
* Allocate DMA - coherent memory space and return both the kernel remapped
* virtual and bus address for that space .
*/
void *
2005-10-21 11:21:33 +04:00
__dma_alloc_coherent ( size_t size , dma_addr_t * handle , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
struct page * page ;
unsigned long order ;
2009-02-12 16:20:53 +03:00
int i ;
unsigned int nr_pages = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
unsigned int array_size = nr_pages * sizeof ( struct page * ) ;
struct page * * pages ;
struct page * end ;
2005-04-17 02:20:36 +04:00
u64 mask = 0x00ffffff , limit ; /* ISA default */
2009-02-12 16:20:53 +03:00
struct vm_struct * area ;
2005-04-17 02:20:36 +04:00
2009-02-12 16:20:53 +03:00
BUG_ON ( ! mem_init_done ) ;
2005-04-17 02:20:36 +04:00
size = PAGE_ALIGN ( size ) ;
limit = ( mask + 1 ) & ~ mask ;
2009-02-12 16:20:53 +03:00
if ( limit & & size > = limit ) {
printk ( KERN_WARNING " coherent allocation too big (requested "
" %#x mask %#Lx) \n " , size , mask ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
order = get_order ( size ) ;
if ( mask ! = 0xffffffff )
gfp | = GFP_DMA ;
page = alloc_pages ( gfp , order ) ;
if ( ! page )
goto no_page ;
2009-02-12 16:20:53 +03:00
end = page + ( 1 < < order ) ;
2005-04-17 02:20:36 +04:00
/*
* Invalidate any data that might be lurking in the
* kernel direct - mapped region for device DMA .
*/
{
unsigned long kaddr = ( unsigned long ) page_address ( page ) ;
memset ( page_address ( page ) , 0 , size ) ;
flush_dcache_range ( kaddr , kaddr + size ) ;
}
2009-02-12 16:20:53 +03:00
split_page ( page , order ) ;
2005-04-17 02:20:36 +04:00
/*
2009-02-12 16:20:53 +03:00
* Set the " dma handle "
2005-04-17 02:20:36 +04:00
*/
2009-02-12 16:20:53 +03:00
* handle = page_to_phys ( page ) ;
area = get_vm_area_caller ( size , VM_IOREMAP ,
__builtin_return_address ( 1 ) ) ;
if ( ! area )
goto out_free_pages ;
if ( array_size > PAGE_SIZE ) {
pages = vmalloc ( array_size ) ;
area - > flags | = VM_VPAGES ;
} else {
pages = kmalloc ( array_size , GFP_KERNEL ) ;
}
if ( ! pages )
goto out_free_area ;
2005-04-17 02:20:36 +04:00
2009-02-12 16:20:53 +03:00
area - > pages = pages ;
area - > nr_pages = nr_pages ;
2005-04-17 02:20:36 +04:00
2009-02-12 16:20:53 +03:00
for ( i = 0 ; i < nr_pages ; i + + )
pages [ i ] = page + i ;
2005-04-17 02:20:36 +04:00
2009-02-12 16:20:53 +03:00
if ( map_vm_area ( area , pgprot_noncached ( PAGE_KERNEL ) , & pages ) )
goto out_unmap ;
2005-04-17 02:20:36 +04:00
2009-02-12 16:20:53 +03:00
/*
* Free the otherwise unused pages .
*/
page + = nr_pages ;
while ( page < end ) {
__free_page ( page ) ;
page + + ;
2005-04-17 02:20:36 +04:00
}
2009-02-12 16:20:53 +03:00
return area - > addr ;
out_unmap :
vunmap ( area - > addr ) ;
if ( array_size > PAGE_SIZE )
vfree ( pages ) ;
else
kfree ( pages ) ;
goto out_free_pages ;
out_free_area :
free_vm_area ( area ) ;
out_free_pages :
2005-04-17 02:20:36 +04:00
if ( page )
__free_pages ( page , order ) ;
2009-02-12 16:20:53 +03:00
no_page :
2005-04-17 02:20:36 +04:00
return NULL ;
}
EXPORT_SYMBOL ( __dma_alloc_coherent ) ;
/*
* free a page as defined by the above mapping .
*/
void __dma_free_coherent ( size_t size , void * vaddr )
{
2009-02-12 16:20:53 +03:00
vfree ( vaddr ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( __dma_free_coherent ) ;
/*
* make an area consistent .
*/
void __dma_sync ( void * vaddr , size_t size , int direction )
{
unsigned long start = ( unsigned long ) vaddr ;
unsigned long end = start + size ;
switch ( direction ) {
case DMA_NONE :
BUG ( ) ;
2008-06-26 13:29:05 +04:00
case DMA_FROM_DEVICE :
/*
* invalidate only when cache - line aligned otherwise there is
* the potential for discarding uncommitted data from the cache
*/
if ( ( start & ( L1_CACHE_BYTES - 1 ) ) | | ( size & ( L1_CACHE_BYTES - 1 ) ) )
flush_dcache_range ( start , end ) ;
else
invalidate_dcache_range ( start , end ) ;
2005-04-17 02:20:36 +04:00
break ;
case DMA_TO_DEVICE : /* writeback only */
clean_dcache_range ( start , end ) ;
break ;
case DMA_BIDIRECTIONAL : /* writeback and invalidate */
flush_dcache_range ( start , end ) ;
break ;
}
}
EXPORT_SYMBOL ( __dma_sync ) ;
# ifdef CONFIG_HIGHMEM
/*
* __dma_sync_page ( ) implementation for systems using highmem .
* In this case , each page of a buffer must be kmapped / kunmapped
* in order to have a virtual address for __dma_sync ( ) . This must
2005-09-10 11:26:54 +04:00
* not sleep so kmap_atomic ( ) / kunmap_atomic ( ) are used .
2005-04-17 02:20:36 +04:00
*
* Note : yes , it is possible and correct to have a buffer extend
* beyond the first page .
*/
static inline void __dma_sync_page_highmem ( struct page * page ,
unsigned long offset , size_t size , int direction )
{
2005-10-11 19:29:07 +04:00
size_t seg_size = min ( ( size_t ) ( PAGE_SIZE - offset ) , size ) ;
2005-04-17 02:20:36 +04:00
size_t cur_size = seg_size ;
unsigned long flags , start , seg_offset = offset ;
2005-10-11 19:29:07 +04:00
int nr_segs = 1 + ( ( size - seg_size ) + PAGE_SIZE - 1 ) / PAGE_SIZE ;
2005-04-17 02:20:36 +04:00
int seg_nr = 0 ;
local_irq_save ( flags ) ;
do {
start = ( unsigned long ) kmap_atomic ( page + seg_nr ,
KM_PPC_SYNC_PAGE ) + seg_offset ;
/* Sync this buffer segment */
__dma_sync ( ( void * ) start , seg_size , direction ) ;
kunmap_atomic ( ( void * ) start , KM_PPC_SYNC_PAGE ) ;
seg_nr + + ;
/* Calculate next buffer segment size */
seg_size = min ( ( size_t ) PAGE_SIZE , size - cur_size ) ;
/* Add the segment size to our running total */
cur_size + = seg_size ;
seg_offset = 0 ;
} while ( seg_nr < nr_segs ) ;
local_irq_restore ( flags ) ;
}
# endif /* CONFIG_HIGHMEM */
/*
* __dma_sync_page makes memory consistent . identical to __dma_sync , but
* takes a struct page instead of a virtual address
*/
void __dma_sync_page ( struct page * page , unsigned long offset ,
size_t size , int direction )
{
# ifdef CONFIG_HIGHMEM
__dma_sync_page_highmem ( page , offset , size , direction ) ;
# else
unsigned long start = ( unsigned long ) page_address ( page ) + offset ;
__dma_sync ( ( void * ) start , size , direction ) ;
# endif
}
EXPORT_SYMBOL ( __dma_sync_page ) ;