2005-04-16 15:20:36 -07:00
/*
* arch / sh / mm / consistent . c
*
2007-11-11 17:07:06 +09:00
* Copyright ( C ) 2004 - 2007 Paul Mundt
2005-04-16 15:20:36 -07:00
*
2008-01-24 18:35:10 +09:00
* Declared coherent memory functions based on arch / x86 / kernel / pci - dma_32 . c
*
2005-04-16 15:20:36 -07:00
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/mm.h>
2009-04-14 15:22:15 +09:00
# include <linux/init.h>
2008-07-16 19:02:54 +09:00
# include <linux/platform_device.h>
2005-04-16 15:20:36 -07:00
# include <linux/dma-mapping.h>
2009-04-09 10:36:54 -07:00
# include <linux/dma-debug.h>
2009-04-14 15:22:15 +09:00
# include <linux/io.h>
2006-09-27 15:13:36 +09:00
# include <asm/cacheflush.h>
# include <asm/addrspace.h>
2009-04-14 15:22:15 +09:00
# define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_init ( void )
{
dma_debug_init ( PREALLOC_DMA_DEBUG_ENTRIES ) ;
return 0 ;
}
fs_initcall ( dma_init ) ;
2005-04-16 15:20:36 -07:00
2008-01-24 18:35:10 +09:00
void * dma_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t gfp )
{
2008-01-25 12:42:48 +09:00
void * ret , * ret_nocache ;
2008-01-24 18:35:10 +09:00
int order = get_order ( size ) ;
2008-07-18 13:30:31 +04:00
if ( dma_alloc_from_coherent ( dev , size , dma_handle , & ret ) )
return ret ;
2005-04-16 15:20:36 -07:00
2008-01-24 18:35:10 +09:00
ret = ( void * ) __get_free_pages ( gfp , order ) ;
2008-01-25 12:42:48 +09:00
if ( ! ret )
return NULL ;
2005-04-16 15:20:36 -07:00
2008-01-25 12:42:48 +09:00
memset ( ret , 0 , size ) ;
/*
* Pages from the page allocator may have data present in
* cache . So flush the cache before using uncached memory .
*/
dma_cache_sync ( dev , ret , size , DMA_BIDIRECTIONAL ) ;
2008-09-04 18:53:58 +09:00
ret_nocache = ( void __force * ) ioremap_nocache ( virt_to_phys ( ret ) , size ) ;
2008-01-25 12:42:48 +09:00
if ( ! ret_nocache ) {
free_pages ( ( unsigned long ) ret , order ) ;
return NULL ;
2007-11-11 17:07:06 +09:00
}
2008-01-25 12:42:48 +09:00
2008-12-17 17:18:45 +09:00
split_page ( pfn_to_page ( virt_to_phys ( ret ) > > PAGE_SHIFT ) , order ) ;
2008-01-25 12:42:48 +09:00
* dma_handle = virt_to_phys ( ret ) ;
2009-04-09 10:36:54 -07:00
debug_dma_alloc_coherent ( dev , size , * dma_handle , ret_nocache ) ;
2008-01-25 12:42:48 +09:00
return ret_nocache ;
2008-01-24 18:35:10 +09:00
}
EXPORT_SYMBOL ( dma_alloc_coherent ) ;
2007-11-11 17:07:06 +09:00
2008-01-24 18:35:10 +09:00
void dma_free_coherent ( struct device * dev , size_t size ,
void * vaddr , dma_addr_t dma_handle )
{
int order = get_order ( size ) ;
2008-12-17 17:18:45 +09:00
unsigned long pfn = dma_handle > > PAGE_SHIFT ;
int k ;
2005-04-16 15:20:36 -07:00
2009-04-09 10:36:54 -07:00
WARN_ON ( irqs_disabled ( ) ) ; /* for portability */
if ( dma_release_from_coherent ( dev , order , vaddr ) )
return ;
debug_dma_free_coherent ( dev , size , vaddr , dma_handle ) ;
for ( k = 0 ; k < ( 1 < < order ) ; k + + )
__free_pages ( pfn_to_page ( pfn + k ) , 0 ) ;
iounmap ( vaddr ) ;
2005-04-16 15:20:36 -07:00
}
2008-01-24 18:35:10 +09:00
EXPORT_SYMBOL ( dma_free_coherent ) ;
2005-04-16 15:20:36 -07:00
2008-01-24 18:35:10 +09:00
void dma_cache_sync ( struct device * dev , void * vaddr , size_t size ,
enum dma_data_direction direction )
2005-04-16 15:20:36 -07:00
{
2007-11-11 17:07:06 +09:00
# ifdef CONFIG_CPU_SH5
void * p1addr = vaddr ;
# else
void * p1addr = ( void * ) P1SEGADDR ( ( unsigned long ) vaddr ) ;
# endif
2005-04-16 15:20:36 -07:00
switch ( direction ) {
case DMA_FROM_DEVICE : /* invalidate only */
2007-10-16 23:29:42 -07:00
__flush_invalidate_region ( p1addr , size ) ;
2005-04-16 15:20:36 -07:00
break ;
case DMA_TO_DEVICE : /* writeback only */
2007-10-16 23:29:42 -07:00
__flush_wback_region ( p1addr , size ) ;
2005-04-16 15:20:36 -07:00
break ;
case DMA_BIDIRECTIONAL : /* writeback and invalidate */
2007-10-16 23:29:42 -07:00
__flush_purge_region ( p1addr , size ) ;
2005-04-16 15:20:36 -07:00
break ;
default :
BUG ( ) ;
}
}
2008-01-24 18:35:10 +09:00
EXPORT_SYMBOL ( dma_cache_sync ) ;
2008-07-16 19:02:54 +09:00
2008-08-11 15:13:24 +09:00
static int __init memchunk_setup ( char * str )
{
return 1 ; /* accept anything that begins with "memchunk." */
}
__setup ( " memchunk. " , memchunk_setup ) ;
2008-08-27 18:21:29 +09:00
static void __init memchunk_cmdline_override ( char * name , unsigned long * sizep )
2008-08-11 15:13:24 +09:00
{
char * p = boot_command_line ;
int k = strlen ( name ) ;
while ( ( p = strstr ( p , " memchunk. " ) ) ) {
p + = 9 ; /* strlen("memchunk.") */
if ( ! strncmp ( name , p , k ) & & p [ k ] = = ' = ' ) {
p + = k + 1 ;
* sizep = memparse ( p , NULL ) ;
pr_info ( " %s: forcing memory chunk size to 0x%08lx \n " ,
name , * sizep ) ;
break ;
}
}
}
2008-08-27 18:21:29 +09:00
int __init platform_resource_setup_memory ( struct platform_device * pdev ,
char * name , unsigned long memsize )
2008-07-16 19:02:54 +09:00
{
struct resource * r ;
dma_addr_t dma_handle ;
void * buf ;
r = pdev - > resource + pdev - > num_resources - 1 ;
if ( r - > flags ) {
pr_warning ( " %s: unable to find empty space for resource \n " ,
name ) ;
return - EINVAL ;
}
2008-08-11 15:13:24 +09:00
memchunk_cmdline_override ( name , & memsize ) ;
if ( ! memsize )
return 0 ;
2008-07-16 19:02:54 +09:00
buf = dma_alloc_coherent ( NULL , memsize , & dma_handle , GFP_KERNEL ) ;
if ( ! buf ) {
pr_warning ( " %s: unable to allocate memory \n " , name ) ;
return - ENOMEM ;
}
memset ( buf , 0 , memsize ) ;
r - > flags = IORESOURCE_MEM ;
r - > start = dma_handle ;
r - > end = r - > start + memsize - 1 ;
r - > name = name ;
return 0 ;
}