2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / consistent . c
*
* Copyright ( C ) 2000 - 2004 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* DMA uncached mapping support .
*/
# include <linux/module.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/errno.h>
# include <linux/list.h>
# include <linux/init.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
2006-04-02 03:07:39 +04:00
# include <asm/memory.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
2006-01-12 19:12:21 +03:00
# include <asm/sizes.h>
/* Sanity check size */
# if (CONSISTENT_DMA_SIZE % SZ_2M)
# error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
# endif
2005-04-17 02:20:36 +04:00
# define CONSISTENT_END (0xffe00000)
2006-01-12 19:12:21 +03:00
# define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
2005-04-17 02:20:36 +04:00
# define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
2006-01-12 19:12:21 +03:00
# define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
# define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
2005-04-17 02:20:36 +04:00
/*
2006-01-12 19:12:21 +03:00
* These are the page tables ( 2 MB each ) covering uncached , DMA consistent allocations
2005-04-17 02:20:36 +04:00
*/
2006-01-12 19:12:21 +03:00
static pte_t * consistent_pte [ NUM_CONSISTENT_PTES ] ;
2005-04-17 02:20:36 +04:00
static DEFINE_SPINLOCK ( consistent_lock ) ;
/*
* VM region handling support .
*
* This should become something generic , handling VM region allocations for
* vmalloc and similar ( ioremap , module space , etc ) .
*
* I envisage vmalloc ( ) ' s supporting vm_struct becoming :
*
* struct vm_struct {
* struct vm_region region ;
* unsigned long flags ;
* struct page * * pages ;
* unsigned int nr_pages ;
* unsigned long phys_addr ;
* } ;
*
* get_vm_area ( ) would then call vm_region_alloc with an appropriate
* struct vm_region head ( eg ) :
*
* struct vm_region vmalloc_head = {
* . vm_list = LIST_HEAD_INIT ( vmalloc_head . vm_list ) ,
* . vm_start = VMALLOC_START ,
* . vm_end = VMALLOC_END ,
* } ;
*
* However , vmalloc_head . vm_start is variable ( typically , it is dependent on
* the amount of RAM found at boot time . ) I would imagine that get_vm_area ( )
* would have to initialise this each time prior to calling vm_region_alloc ( ) .
*/
struct vm_region {
struct list_head vm_list ;
unsigned long vm_start ;
unsigned long vm_end ;
struct page * vm_pages ;
2005-11-25 18:52:51 +03:00
int vm_active ;
2005-04-17 02:20:36 +04:00
} ;
static struct vm_region consistent_head = {
. vm_list = LIST_HEAD_INIT ( consistent_head . vm_list ) ,
. vm_start = CONSISTENT_BASE ,
. vm_end = CONSISTENT_END ,
} ;
static struct vm_region *
2005-10-21 11:20:58 +04:00
vm_region_alloc ( struct vm_region * head , size_t size , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
unsigned long addr = head - > vm_start , end = head - > vm_end - size ;
unsigned long flags ;
struct vm_region * c , * new ;
new = kmalloc ( sizeof ( struct vm_region ) , gfp ) ;
if ( ! new )
goto out ;
spin_lock_irqsave ( & consistent_lock , flags ) ;
list_for_each_entry ( c , & head - > vm_list , vm_list ) {
if ( ( addr + size ) < addr )
goto nospc ;
if ( ( addr + size ) < = c - > vm_start )
goto found ;
addr = c - > vm_end ;
if ( addr > end )
goto nospc ;
}
found :
/*
* Insert this entry _before_ the one we found .
*/
list_add_tail ( & new - > vm_list , & c - > vm_list ) ;
new - > vm_start = addr ;
new - > vm_end = addr + size ;
2005-11-25 18:52:51 +03:00
new - > vm_active = 1 ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & consistent_lock , flags ) ;
return new ;
nospc :
spin_unlock_irqrestore ( & consistent_lock , flags ) ;
kfree ( new ) ;
out :
return NULL ;
}
static struct vm_region * vm_region_find ( struct vm_region * head , unsigned long addr )
{
struct vm_region * c ;
list_for_each_entry ( c , & head - > vm_list , vm_list ) {
2005-11-25 18:52:51 +03:00
if ( c - > vm_active & & c - > vm_start = = addr )
2005-04-17 02:20:36 +04:00
goto out ;
}
c = NULL ;
out :
return c ;
}
# ifdef CONFIG_HUGETLB_PAGE
# error ARM Coherent DMA allocator does not (yet) support huge TLB
# endif
static void *
2005-10-21 11:20:58 +04:00
__dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp ,
2005-04-17 02:20:36 +04:00
pgprot_t prot )
{
struct page * page ;
struct vm_region * c ;
unsigned long order ;
u64 mask = ISA_DMA_THRESHOLD , limit ;
2006-01-12 19:12:21 +03:00
if ( ! consistent_pte [ 0 ] ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " %s: not initialised \n " , __func__ ) ;
dump_stack ( ) ;
return NULL ;
}
if ( dev ) {
mask = dev - > coherent_dma_mask ;
/*
* Sanity check the DMA mask - it must be non - zero , and
* must be able to be satisfied by a DMA allocation .
*/
if ( mask = = 0 ) {
dev_warn ( dev , " coherent DMA mask is unset \n " ) ;
goto no_page ;
}
if ( ( ~ mask ) & ISA_DMA_THRESHOLD ) {
dev_warn ( dev , " coherent DMA mask %#llx is smaller "
" than system GFP_DMA mask %#llx \n " ,
mask , ( unsigned long long ) ISA_DMA_THRESHOLD ) ;
goto no_page ;
}
}
/*
* Sanity check the allocation size .
*/
size = PAGE_ALIGN ( size ) ;
limit = ( mask + 1 ) & ~ mask ;
if ( ( limit & & size > = limit ) | |
size > = ( CONSISTENT_END - CONSISTENT_BASE ) ) {
printk ( KERN_WARNING " coherent allocation too big "
" (requested %#x mask %#llx) \n " , size , mask ) ;
goto no_page ;
}
order = get_order ( size ) ;
if ( mask ! = 0xffffffff )
gfp | = GFP_DMA ;
page = alloc_pages ( gfp , order ) ;
if ( ! page )
goto no_page ;
/*
* Invalidate any data that might be lurking in the
* kernel direct - mapped region for device DMA .
*/
{
2007-02-06 20:39:31 +03:00
void * ptr = page_address ( page ) ;
memset ( ptr , 0 , size ) ;
dmac_flush_range ( ptr , ptr + size ) ;
outer_flush_range ( __pa ( ptr ) , __pa ( ptr ) + size ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Allocate a virtual address in the consistent mapping region .
*/
c = vm_region_alloc ( & consistent_head , size ,
gfp & ~ ( __GFP_DMA | __GFP_HIGHMEM ) ) ;
if ( c ) {
2006-01-12 19:12:21 +03:00
pte_t * pte ;
2005-04-17 02:20:36 +04:00
struct page * end = page + ( 1 < < order ) ;
2006-01-12 19:12:21 +03:00
int idx = CONSISTENT_PTE_INDEX ( c - > vm_start ) ;
u32 off = CONSISTENT_OFFSET ( c - > vm_start ) & ( PTRS_PER_PTE - 1 ) ;
2005-04-17 02:20:36 +04:00
2006-01-12 19:12:21 +03:00
pte = consistent_pte [ idx ] + off ;
2005-04-17 02:20:36 +04:00
c - > vm_pages = page ;
2006-03-22 11:08:05 +03:00
split_page ( page , order ) ;
2005-04-17 02:20:36 +04:00
/*
* Set the " dma handle "
*/
* handle = page_to_dma ( dev , page ) ;
do {
BUG_ON ( ! pte_none ( * pte ) ) ;
/*
* x86 does not mark the pages reserved . . .
*/
SetPageReserved ( page ) ;
2006-12-13 17:34:43 +03:00
set_pte_ext ( pte , mk_pte ( page , prot ) , 0 ) ;
2005-04-17 02:20:36 +04:00
page + + ;
pte + + ;
2006-01-12 19:12:21 +03:00
off + + ;
if ( off > = PTRS_PER_PTE ) {
off = 0 ;
pte = consistent_pte [ + + idx ] ;
}
2005-04-17 02:20:36 +04:00
} while ( size - = PAGE_SIZE ) ;
/*
* Free the otherwise unused pages .
*/
while ( page < end ) {
__free_page ( page ) ;
page + + ;
}
return ( void * ) c - > vm_start ;
}
if ( page )
__free_pages ( page , order ) ;
no_page :
* handle = ~ 0 ;
return NULL ;
}
/*
* Allocate DMA - coherent memory space and return both the kernel remapped
* virtual and bus address for that space .
*/
void *
2005-10-21 11:20:58 +04:00
dma_alloc_coherent ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
2006-04-02 03:07:39 +04:00
if ( arch_is_coherent ( ) ) {
void * virt ;
virt = kmalloc ( size , gfp ) ;
if ( ! virt )
return NULL ;
* handle = virt_to_dma ( dev , virt ) ;
return virt ;
}
2005-04-17 02:20:36 +04:00
return __dma_alloc ( dev , size , handle , gfp ,
pgprot_noncached ( pgprot_kernel ) ) ;
}
EXPORT_SYMBOL ( dma_alloc_coherent ) ;
/*
* Allocate a writecombining region , in much the same way as
* dma_alloc_coherent above .
*/
void *
2005-10-21 11:20:58 +04:00
dma_alloc_writecombine ( struct device * dev , size_t size , dma_addr_t * handle , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
return __dma_alloc ( dev , size , handle , gfp ,
pgprot_writecombine ( pgprot_kernel ) ) ;
}
EXPORT_SYMBOL ( dma_alloc_writecombine ) ;
static int dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
unsigned long flags , user_size , kern_size ;
struct vm_region * c ;
int ret = - ENXIO ;
user_size = ( vma - > vm_end - vma - > vm_start ) > > PAGE_SHIFT ;
spin_lock_irqsave ( & consistent_lock , flags ) ;
c = vm_region_find ( & consistent_head , ( unsigned long ) cpu_addr ) ;
spin_unlock_irqrestore ( & consistent_lock , flags ) ;
if ( c ) {
unsigned long off = vma - > vm_pgoff ;
kern_size = ( c - > vm_end - c - > vm_start ) > > PAGE_SHIFT ;
if ( off < kern_size & &
user_size < = ( kern_size - off ) ) {
vma - > vm_flags | = VM_RESERVED ;
ret = remap_pfn_range ( vma , vma - > vm_start ,
page_to_pfn ( c - > vm_pages ) + off ,
user_size < < PAGE_SHIFT ,
vma - > vm_page_prot ) ;
}
}
return ret ;
}
int dma_mmap_coherent ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
return dma_mmap ( dev , vma , cpu_addr , dma_addr , size ) ;
}
EXPORT_SYMBOL ( dma_mmap_coherent ) ;
int dma_mmap_writecombine ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size )
{
vma - > vm_page_prot = pgprot_writecombine ( vma - > vm_page_prot ) ;
return dma_mmap ( dev , vma , cpu_addr , dma_addr , size ) ;
}
EXPORT_SYMBOL ( dma_mmap_writecombine ) ;
/*
* free a page as defined by the above mapping .
2005-11-25 18:52:51 +03:00
* Must not be called with IRQs disabled .
2005-04-17 02:20:36 +04:00
*/
void dma_free_coherent ( struct device * dev , size_t size , void * cpu_addr , dma_addr_t handle )
{
struct vm_region * c ;
unsigned long flags , addr ;
pte_t * ptep ;
2006-01-12 19:12:21 +03:00
int idx ;
u32 off ;
2005-04-17 02:20:36 +04:00
2005-11-25 18:52:51 +03:00
WARN_ON ( irqs_disabled ( ) ) ;
2006-04-02 03:07:39 +04:00
if ( arch_is_coherent ( ) ) {
kfree ( cpu_addr ) ;
return ;
}
2005-04-17 02:20:36 +04:00
size = PAGE_ALIGN ( size ) ;
spin_lock_irqsave ( & consistent_lock , flags ) ;
c = vm_region_find ( & consistent_head , ( unsigned long ) cpu_addr ) ;
if ( ! c )
goto no_area ;
2005-11-25 18:52:51 +03:00
c - > vm_active = 0 ;
spin_unlock_irqrestore ( & consistent_lock , flags ) ;
2005-04-17 02:20:36 +04:00
if ( ( c - > vm_end - c - > vm_start ) ! = size ) {
printk ( KERN_ERR " %s: freeing wrong coherent size (%ld != %d) \n " ,
__func__ , c - > vm_end - c - > vm_start , size ) ;
dump_stack ( ) ;
size = c - > vm_end - c - > vm_start ;
}
2006-01-12 19:12:21 +03:00
idx = CONSISTENT_PTE_INDEX ( c - > vm_start ) ;
off = CONSISTENT_OFFSET ( c - > vm_start ) & ( PTRS_PER_PTE - 1 ) ;
ptep = consistent_pte [ idx ] + off ;
2005-04-17 02:20:36 +04:00
addr = c - > vm_start ;
do {
pte_t pte = ptep_get_and_clear ( & init_mm , addr , ptep ) ;
unsigned long pfn ;
ptep + + ;
addr + = PAGE_SIZE ;
2006-01-12 19:12:21 +03:00
off + + ;
if ( off > = PTRS_PER_PTE ) {
off = 0 ;
ptep = consistent_pte [ + + idx ] ;
}
2005-04-17 02:20:36 +04:00
if ( ! pte_none ( pte ) & & pte_present ( pte ) ) {
pfn = pte_pfn ( pte ) ;
if ( pfn_valid ( pfn ) ) {
struct page * page = pfn_to_page ( pfn ) ;
/*
* x86 does not mark the pages reserved . . .
*/
ClearPageReserved ( page ) ;
__free_page ( page ) ;
continue ;
}
}
printk ( KERN_CRIT " %s: bad page in kernel page table \n " ,
__func__ ) ;
} while ( size - = PAGE_SIZE ) ;
flush_tlb_kernel_range ( c - > vm_start , c - > vm_end ) ;
2005-11-25 18:52:51 +03:00
spin_lock_irqsave ( & consistent_lock , flags ) ;
2005-04-17 02:20:36 +04:00
list_del ( & c - > vm_list ) ;
spin_unlock_irqrestore ( & consistent_lock , flags ) ;
kfree ( c ) ;
return ;
no_area :
spin_unlock_irqrestore ( & consistent_lock , flags ) ;
printk ( KERN_ERR " %s: trying to free invalid coherent area: %p \n " ,
__func__ , cpu_addr ) ;
dump_stack ( ) ;
}
EXPORT_SYMBOL ( dma_free_coherent ) ;
/*
* Initialise the consistent memory allocation .
*/
static int __init consistent_init ( void )
{
pgd_t * pgd ;
pmd_t * pmd ;
pte_t * pte ;
2006-01-12 19:12:21 +03:00
int ret = 0 , i = 0 ;
u32 base = CONSISTENT_BASE ;
2005-04-17 02:20:36 +04:00
do {
2006-01-12 19:12:21 +03:00
pgd = pgd_offset ( & init_mm , base ) ;
pmd = pmd_alloc ( & init_mm , pgd , base ) ;
2005-04-17 02:20:36 +04:00
if ( ! pmd ) {
printk ( KERN_ERR " %s: no pmd tables \n " , __func__ ) ;
ret = - ENOMEM ;
break ;
}
WARN_ON ( ! pmd_none ( * pmd ) ) ;
2006-01-12 19:12:21 +03:00
pte = pte_alloc_kernel ( pmd , base ) ;
2005-04-17 02:20:36 +04:00
if ( ! pte ) {
printk ( KERN_ERR " %s: no pte tables \n " , __func__ ) ;
ret = - ENOMEM ;
break ;
}
2006-01-12 19:12:21 +03:00
consistent_pte [ i + + ] = pte ;
base + = ( 1 < < PGDIR_SHIFT ) ;
} while ( base < CONSISTENT_END ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
core_initcall ( consistent_init ) ;
/*
* Make an area consistent for devices .
2006-11-22 00:57:23 +03:00
* Note : Drivers should NOT use this function directly , as it will break
* platforms with CONFIG_DMABOUNCE .
* Use the driver DMA support - see dma - mapping . h ( dma_sync_ * )
2005-04-17 02:20:36 +04:00
*/
2007-02-06 20:39:31 +03:00
void consistent_sync ( const void * start , size_t size , int direction )
2005-04-17 02:20:36 +04:00
{
2007-02-06 20:39:31 +03:00
const void * end = start + size ;
2005-04-17 02:20:36 +04:00
2007-02-08 18:26:23 +03:00
BUG_ON ( ! virt_addr_valid ( start ) | | ! virt_addr_valid ( end - 1 ) ) ;
2007-02-05 16:48:08 +03:00
2005-04-17 02:20:36 +04:00
switch ( direction ) {
case DMA_FROM_DEVICE : /* invalidate only */
dmac_inv_range ( start , end ) ;
2007-02-05 16:48:08 +03:00
outer_inv_range ( __pa ( start ) , __pa ( end ) ) ;
2005-04-17 02:20:36 +04:00
break ;
case DMA_TO_DEVICE : /* writeback only */
dmac_clean_range ( start , end ) ;
2007-02-05 16:48:08 +03:00
outer_clean_range ( __pa ( start ) , __pa ( end ) ) ;
2005-04-17 02:20:36 +04:00
break ;
case DMA_BIDIRECTIONAL : /* writeback and invalidate */
dmac_flush_range ( start , end ) ;
2007-02-05 16:48:08 +03:00
outer_flush_range ( __pa ( start ) , __pa ( end ) ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
BUG ( ) ;
}
}
EXPORT_SYMBOL ( consistent_sync ) ;