2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 2001 Mike Corrigan & Dave Engebretsen , IBM Corporation
*
* Rewrite , cleanup , new allocation schemes , virtual merging :
* Copyright ( C ) 2004 Olof Johansson , IBM Corporation
* and Ben . Herrenschmidt , IBM Corporation
*
* Dynamic DMA mapping support , bus - independent parts .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include <linux/init.h>
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/spinlock.h>
# include <linux/string.h>
# include <linux/dma-mapping.h>
# include <linux/bitops.h>
2008-02-04 22:28:08 -08:00
# include <linux/iommu-helper.h>
2008-10-22 15:39:04 -05:00
# include <linux/crash_dump.h>
2005-04-16 15:20:36 -07:00
# include <asm/io.h>
# include <asm/prom.h>
# include <asm/iommu.h>
# include <asm/pci-bridge.h>
# include <asm/machdep.h>
2006-06-22 23:35:10 -07:00
# include <asm/kdump.h>
2005-04-16 15:20:36 -07:00
# define DBG(...)
# ifdef CONFIG_IOMMU_VMERGE
static int novmerge = 0 ;
# else
static int novmerge = 1 ;
# endif
2007-03-29 08:44:02 -05:00
static int protect4gb = 1 ;
2008-07-24 04:31:16 +10:00
static void __iommu_free ( struct iommu_table * , dma_addr_t , unsigned int ) ;
2007-03-29 08:44:02 -05:00
static int __init setup_protect4gb ( char * str )
{
if ( strcmp ( str , " on " ) = = 0 )
protect4gb = 1 ;
else if ( strcmp ( str , " off " ) = = 0 )
protect4gb = 0 ;
return 1 ;
}
2005-04-16 15:20:36 -07:00
static int __init setup_iommu ( char * str )
{
if ( ! strcmp ( str , " novmerge " ) )
novmerge = 1 ;
else if ( ! strcmp ( str , " vmerge " ) )
novmerge = 0 ;
return 1 ;
}
2007-03-29 08:44:02 -05:00
__setup ( " protect4gb= " , setup_protect4gb ) ;
2005-04-16 15:20:36 -07:00
__setup ( " iommu= " , setup_iommu ) ;
2008-02-04 22:28:08 -08:00
static unsigned long iommu_range_alloc ( struct device * dev ,
struct iommu_table * tbl ,
2005-04-16 15:20:36 -07:00
unsigned long npages ,
unsigned long * handle ,
2006-04-12 21:05:59 -05:00
unsigned long mask ,
2005-04-16 15:20:36 -07:00
unsigned int align_order )
{
2008-02-04 22:28:08 -08:00
unsigned long n , end , start ;
2005-04-16 15:20:36 -07:00
unsigned long limit ;
int largealloc = npages > 15 ;
int pass = 0 ;
unsigned long align_mask ;
2008-02-04 22:28:08 -08:00
unsigned long boundary_size ;
2005-04-16 15:20:36 -07:00
align_mask = 0xffffffffffffffffl > > ( 64 - align_order ) ;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
2006-10-04 17:25:44 +02:00
if ( unlikely ( npages = = 0 ) ) {
2005-04-16 15:20:36 -07:00
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return DMA_ERROR_CODE ;
}
if ( handle & & * handle )
start = * handle ;
else
start = largealloc ? tbl - > it_largehint : tbl - > it_hint ;
/* Use only half of the table for small allocs (15 pages or less) */
limit = largealloc ? tbl - > it_size : tbl - > it_halfpoint ;
if ( largealloc & & start < tbl - > it_halfpoint )
start = tbl - > it_halfpoint ;
/* The case below can happen if we have a small segment appended
* to a large , or when the previous alloc was at the very end of
* the available space . If so , go back to the initial start .
*/
if ( start > = limit )
start = largealloc ? tbl - > it_largehint : tbl - > it_hint ;
2006-04-12 21:05:59 -05:00
2005-04-16 15:20:36 -07:00
again :
2006-04-12 21:05:59 -05:00
if ( limit + tbl - > it_offset > mask ) {
limit = mask - tbl - > it_offset + 1 ;
/* If we're constrained on address range, first try
* at the masked hint to avoid O ( n ) search complexity ,
* but on second pass , start at 0.
*/
if ( ( start & mask ) > = limit | | pass > 0 )
start = 0 ;
else
start & = mask ;
}
2008-02-04 22:28:08 -08:00
if ( dev )
boundary_size = ALIGN ( dma_get_seg_boundary ( dev ) + 1 ,
1 < < IOMMU_PAGE_SHIFT ) ;
else
boundary_size = ALIGN ( 1UL < < 32 , 1 < < IOMMU_PAGE_SHIFT ) ;
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
2005-04-16 15:20:36 -07:00
2008-02-04 22:28:08 -08:00
n = iommu_area_alloc ( tbl - > it_map , limit , start , npages ,
tbl - > it_offset , boundary_size > > IOMMU_PAGE_SHIFT ,
align_mask ) ;
if ( n = = - 1 ) {
2005-04-16 15:20:36 -07:00
if ( likely ( pass < 2 ) ) {
/* First failure, just rescan the half of the table.
* Second failure , rescan the other half of the table .
*/
start = ( largealloc ^ pass ) ? tbl - > it_halfpoint : 0 ;
limit = pass ? tbl - > it_size : limit ;
pass + + ;
goto again ;
} else {
/* Third failure, give up */
return DMA_ERROR_CODE ;
}
}
2008-02-04 22:28:08 -08:00
end = n + npages ;
2005-04-16 15:20:36 -07:00
/* Bump the hint to a new block for small allocs. */
if ( largealloc ) {
/* Don't bump to new block to avoid fragmentation */
tbl - > it_largehint = end ;
} else {
/* Overflow will be taken care of at the next allocation */
tbl - > it_hint = ( end + tbl - > it_blocksize - 1 ) &
~ ( tbl - > it_blocksize - 1 ) ;
}
/* Update handle for SG allocations */
if ( handle )
* handle = end ;
return n ;
}
2008-02-04 22:28:08 -08:00
static dma_addr_t iommu_alloc ( struct device * dev , struct iommu_table * tbl ,
void * page , unsigned int npages ,
enum dma_data_direction direction ,
2008-07-16 05:51:47 +10:00
unsigned long mask , unsigned int align_order ,
struct dma_attrs * attrs )
2005-04-16 15:20:36 -07:00
{
unsigned long entry , flags ;
dma_addr_t ret = DMA_ERROR_CODE ;
2008-07-24 04:31:16 +10:00
int build_fail ;
2006-04-12 21:05:59 -05:00
2005-04-16 15:20:36 -07:00
spin_lock_irqsave ( & ( tbl - > it_lock ) , flags ) ;
2008-02-04 22:28:08 -08:00
entry = iommu_range_alloc ( dev , tbl , npages , NULL , mask , align_order ) ;
2005-04-16 15:20:36 -07:00
if ( unlikely ( entry = = DMA_ERROR_CODE ) ) {
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
return DMA_ERROR_CODE ;
}
entry + = tbl - > it_offset ; /* Offset into real TCE table */
2006-10-30 16:15:59 +11:00
ret = entry < < IOMMU_PAGE_SHIFT ; /* Set the return dma address */
2005-04-16 15:20:36 -07:00
/* Put the TCEs in the HW table */
2008-07-24 04:31:16 +10:00
build_fail = ppc_md . tce_build ( tbl , entry , npages ,
( unsigned long ) page & IOMMU_PAGE_MASK ,
direction , attrs ) ;
/* ppc_md.tce_build() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
* DMA_ERROR_CODE . For all other errors the functionality is
* not altered .
*/
if ( unlikely ( build_fail ) ) {
__iommu_free ( tbl , ret , npages ) ;
2005-04-16 15:20:36 -07:00
2008-07-24 04:31:16 +10:00
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
return DMA_ERROR_CODE ;
}
2005-04-16 15:20:36 -07:00
/* Flush/invalidate TLB caches if necessary */
if ( ppc_md . tce_flush )
ppc_md . tce_flush ( tbl ) ;
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
/* Make sure updates are seen by hardware */
mb ( ) ;
return ret ;
}
static void __iommu_free ( struct iommu_table * tbl , dma_addr_t dma_addr ,
unsigned int npages )
{
unsigned long entry , free_entry ;
2006-10-30 16:15:59 +11:00
entry = dma_addr > > IOMMU_PAGE_SHIFT ;
2005-04-16 15:20:36 -07:00
free_entry = entry - tbl - > it_offset ;
if ( ( ( free_entry + npages ) > tbl - > it_size ) | |
( entry < tbl - > it_offset ) ) {
if ( printk_ratelimit ( ) ) {
printk ( KERN_INFO " iommu_free: invalid entry \n " ) ;
printk ( KERN_INFO " \t entry = 0x%lx \n " , entry ) ;
printk ( KERN_INFO " \t dma_addr = 0x%lx \n " , ( u64 ) dma_addr ) ;
printk ( KERN_INFO " \t Table = 0x%lx \n " , ( u64 ) tbl ) ;
printk ( KERN_INFO " \t bus# = 0x%lx \n " , ( u64 ) tbl - > it_busno ) ;
printk ( KERN_INFO " \t size = 0x%lx \n " , ( u64 ) tbl - > it_size ) ;
printk ( KERN_INFO " \t startOff = 0x%lx \n " , ( u64 ) tbl - > it_offset ) ;
printk ( KERN_INFO " \t index = 0x%lx \n " , ( u64 ) tbl - > it_index ) ;
WARN_ON ( 1 ) ;
}
return ;
}
ppc_md . tce_free ( tbl , entry , npages ) ;
2008-02-04 22:28:08 -08:00
iommu_area_free ( tbl - > it_map , free_entry , npages ) ;
2005-04-16 15:20:36 -07:00
}
static void iommu_free ( struct iommu_table * tbl , dma_addr_t dma_addr ,
unsigned int npages )
{
unsigned long flags ;
spin_lock_irqsave ( & ( tbl - > it_lock ) , flags ) ;
__iommu_free ( tbl , dma_addr , npages ) ;
/* Make sure TLB cache is flushed if the HW needs it. We do
* not do an mb ( ) here on purpose , it is not needed on any of
* the current platforms .
*/
if ( ppc_md . tce_flush )
ppc_md . tce_flush ( tbl ) ;
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
}
2008-07-05 05:05:41 +10:00
int iommu_map_sg ( struct device * dev , struct iommu_table * tbl ,
struct scatterlist * sglist , int nelems ,
2008-07-05 05:05:42 +10:00
unsigned long mask , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-16 15:20:36 -07:00
{
dma_addr_t dma_next = 0 , dma_addr ;
unsigned long flags ;
struct scatterlist * s , * outs , * segstart ;
2008-07-24 04:31:16 +10:00
int outcount , incount , i , build_fail = 0 ;
2008-01-08 10:34:22 +11:00
unsigned int align ;
2005-04-16 15:20:36 -07:00
unsigned long handle ;
2008-02-04 22:27:57 -08:00
unsigned int max_seg_size ;
2005-04-16 15:20:36 -07:00
BUG_ON ( direction = = DMA_NONE ) ;
if ( ( nelems = = 0 ) | | ! tbl )
return 0 ;
outs = s = segstart = & sglist [ 0 ] ;
outcount = 1 ;
2005-08-18 07:32:18 +10:00
incount = nelems ;
2005-04-16 15:20:36 -07:00
handle = 0 ;
/* Init first segment length for backout at failure */
outs - > dma_length = 0 ;
2006-10-30 16:15:59 +11:00
DBG ( " sg mapping %d elements: \n " , nelems ) ;
2005-04-16 15:20:36 -07:00
spin_lock_irqsave ( & ( tbl - > it_lock ) , flags ) ;
2008-02-04 22:27:57 -08:00
max_seg_size = dma_get_max_seg_size ( dev ) ;
2007-10-12 13:44:12 +02:00
for_each_sg ( sglist , s , nelems , i ) {
2005-04-16 15:20:36 -07:00
unsigned long vaddr , npages , entry , slen ;
slen = s - > length ;
/* Sanity check */
if ( slen = = 0 ) {
dma_next = 0 ;
continue ;
}
/* Allocate iommu entries for that segment */
2007-10-22 20:02:46 +02:00
vaddr = ( unsigned long ) sg_virt ( s ) ;
2008-10-15 22:02:13 -07:00
npages = iommu_num_pages ( vaddr , slen , IOMMU_PAGE_SIZE ) ;
2008-01-08 10:34:22 +11:00
align = 0 ;
if ( IOMMU_PAGE_SHIFT < PAGE_SHIFT & & slen > = PAGE_SIZE & &
( vaddr & ~ PAGE_MASK ) = = 0 )
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT ;
2008-02-04 22:28:08 -08:00
entry = iommu_range_alloc ( dev , tbl , npages , & handle ,
2008-01-08 10:34:22 +11:00
mask > > IOMMU_PAGE_SHIFT , align ) ;
2005-04-16 15:20:36 -07:00
DBG ( " - vaddr: %lx, size: %lx \n " , vaddr , slen ) ;
/* Handle failure */
if ( unlikely ( entry = = DMA_ERROR_CODE ) ) {
if ( printk_ratelimit ( ) )
printk ( KERN_INFO " iommu_alloc failed, tbl %p vaddr %lx "
" npages %lx \n " , tbl , vaddr , npages ) ;
goto failure ;
}
/* Convert entry to a dma_addr_t */
entry + = tbl - > it_offset ;
2006-10-30 16:15:59 +11:00
dma_addr = entry < < IOMMU_PAGE_SHIFT ;
dma_addr | = ( s - > offset & ~ IOMMU_PAGE_MASK ) ;
2005-04-16 15:20:36 -07:00
2006-10-30 16:15:59 +11:00
DBG ( " - %lu pages, entry: %lx, dma_addr: %lx \n " ,
2005-04-16 15:20:36 -07:00
npages , entry , dma_addr ) ;
/* Insert into HW table */
2008-07-24 04:31:16 +10:00
build_fail = ppc_md . tce_build ( tbl , entry , npages ,
vaddr & IOMMU_PAGE_MASK ,
direction , attrs ) ;
if ( unlikely ( build_fail ) )
goto failure ;
2005-04-16 15:20:36 -07:00
/* If we are in an open segment, try merging */
if ( segstart ! = s ) {
DBG ( " - trying merge... \n " ) ;
/* We cannot merge if:
* - allocated dma_addr isn ' t contiguous to previous allocation
*/
2008-02-04 22:27:57 -08:00
if ( novmerge | | ( dma_addr ! = dma_next ) | |
( outs - > dma_length + s - > length > max_seg_size ) ) {
2005-04-16 15:20:36 -07:00
/* Can't merge: create a new segment */
segstart = s ;
2007-10-12 13:44:12 +02:00
outcount + + ;
outs = sg_next ( outs ) ;
2005-04-16 15:20:36 -07:00
DBG ( " can't merge, new segment. \n " ) ;
} else {
outs - > dma_length + = s - > length ;
2006-10-30 16:15:59 +11:00
DBG ( " merged, new len: %ux \n " , outs - > dma_length ) ;
2005-04-16 15:20:36 -07:00
}
}
if ( segstart = = s ) {
/* This is a new segment, fill entries */
DBG ( " - filling new segment. \n " ) ;
outs - > dma_address = dma_addr ;
outs - > dma_length = slen ;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen ;
DBG ( " - dma next is: %lx \n " , dma_next ) ;
}
/* Flush/invalidate TLB caches if necessary */
if ( ppc_md . tce_flush )
ppc_md . tce_flush ( tbl ) ;
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
DBG ( " mapped %d elements: \n " , outcount ) ;
2005-08-18 07:32:18 +10:00
/* For the sake of iommu_unmap_sg, we clear out the length in the
2005-04-16 15:20:36 -07:00
* next entry of the sglist if we didn ' t fill the list completely
*/
2005-08-18 07:32:18 +10:00
if ( outcount < incount ) {
2007-10-12 13:44:12 +02:00
outs = sg_next ( outs ) ;
2005-04-16 15:20:36 -07:00
outs - > dma_address = DMA_ERROR_CODE ;
outs - > dma_length = 0 ;
}
2006-01-30 21:51:54 -06:00
/* Make sure updates are seen by hardware */
mb ( ) ;
2005-04-16 15:20:36 -07:00
return outcount ;
failure :
2007-10-12 13:44:12 +02:00
for_each_sg ( sglist , s , nelems , i ) {
2005-04-16 15:20:36 -07:00
if ( s - > dma_length ! = 0 ) {
unsigned long vaddr , npages ;
2006-10-30 16:15:59 +11:00
vaddr = s - > dma_address & IOMMU_PAGE_MASK ;
2008-10-15 22:02:13 -07:00
npages = iommu_num_pages ( s - > dma_address , s - > dma_length ,
IOMMU_PAGE_SIZE ) ;
2005-04-16 15:20:36 -07:00
__iommu_free ( tbl , vaddr , npages ) ;
2006-01-30 21:51:54 -06:00
s - > dma_address = DMA_ERROR_CODE ;
s - > dma_length = 0 ;
2005-04-16 15:20:36 -07:00
}
2007-10-12 13:44:12 +02:00
if ( s = = outs )
break ;
2005-04-16 15:20:36 -07:00
}
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
return 0 ;
}
void iommu_unmap_sg ( struct iommu_table * tbl , struct scatterlist * sglist ,
2008-07-05 05:05:42 +10:00
int nelems , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-16 15:20:36 -07:00
{
2007-10-12 13:44:12 +02:00
struct scatterlist * sg ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
BUG_ON ( direction = = DMA_NONE ) ;
if ( ! tbl )
return ;
spin_lock_irqsave ( & ( tbl - > it_lock ) , flags ) ;
2007-10-12 13:44:12 +02:00
sg = sglist ;
2005-04-16 15:20:36 -07:00
while ( nelems - - ) {
unsigned int npages ;
2007-10-12 13:44:12 +02:00
dma_addr_t dma_handle = sg - > dma_address ;
2005-04-16 15:20:36 -07:00
2007-10-12 13:44:12 +02:00
if ( sg - > dma_length = = 0 )
2005-04-16 15:20:36 -07:00
break ;
2008-10-15 22:02:13 -07:00
npages = iommu_num_pages ( dma_handle , sg - > dma_length ,
IOMMU_PAGE_SIZE ) ;
2005-04-16 15:20:36 -07:00
__iommu_free ( tbl , dma_handle , npages ) ;
2007-10-12 13:44:12 +02:00
sg = sg_next ( sg ) ;
2005-04-16 15:20:36 -07:00
}
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
* do not do an mb ( ) here , the affected platforms do not need it
* when freeing .
*/
if ( ppc_md . tce_flush )
ppc_md . tce_flush ( tbl ) ;
spin_unlock_irqrestore ( & ( tbl - > it_lock ) , flags ) ;
}
2008-10-21 17:38:10 +00:00
static void iommu_table_clear ( struct iommu_table * tbl )
{
2008-10-22 15:39:04 -05:00
if ( ! is_kdump_kernel ( ) ) {
2008-10-21 17:38:10 +00:00
/* Clear the table in case firmware left allocations in it */
ppc_md . tce_free ( tbl , tbl - > it_offset , tbl - > it_size ) ;
return ;
}
# ifdef CONFIG_CRASH_DUMP
if ( ppc_md . tce_get ) {
unsigned long index , tceval , tcecount = 0 ;
/* Reserve the existing mappings left by the first kernel. */
for ( index = 0 ; index < tbl - > it_size ; index + + ) {
tceval = ppc_md . tce_get ( tbl , index + tbl - > it_offset ) ;
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if ( tceval & & ( tceval ! = 0x7fffffffffffffffUL ) ) {
__set_bit ( index , tbl - > it_map ) ;
tcecount + + ;
}
}
if ( ( tbl - > it_size - tcecount ) < KDUMP_MIN_TCE_ENTRIES ) {
printk ( KERN_WARNING " TCE table is full; freeing " ) ;
printk ( KERN_WARNING " %d entries for the kdump boot \n " ,
KDUMP_MIN_TCE_ENTRIES ) ;
for ( index = tbl - > it_size - KDUMP_MIN_TCE_ENTRIES ;
index < tbl - > it_size ; index + + )
__clear_bit ( index , tbl - > it_map ) ;
}
}
# endif
}
2005-04-16 15:20:36 -07:00
/*
* Build a iommu_table structure . This contains a bit map which
* is used to manage allocation of the tce space .
*/
2006-06-10 20:58:08 +10:00
struct iommu_table * iommu_init_table ( struct iommu_table * tbl , int nid )
2005-04-16 15:20:36 -07:00
{
unsigned long sz ;
static int welcomed = 0 ;
2006-06-10 20:58:08 +10:00
struct page * page ;
2005-04-16 15:20:36 -07:00
/* Set aside 1/4 of the table for large allocations. */
tbl - > it_halfpoint = tbl - > it_size * 3 / 4 ;
/* number of bytes needed for the bitmap */
sz = ( tbl - > it_size + 7 ) > > 3 ;
2006-06-10 20:58:08 +10:00
page = alloc_pages_node ( nid , GFP_ATOMIC , get_order ( sz ) ) ;
if ( ! page )
2005-04-16 15:20:36 -07:00
panic ( " iommu_init_table: Can't allocate %ld bytes \n " , sz ) ;
2006-06-10 20:58:08 +10:00
tbl - > it_map = page_address ( page ) ;
2005-04-16 15:20:36 -07:00
memset ( tbl - > it_map , 0 , sz ) ;
tbl - > it_hint = 0 ;
tbl - > it_largehint = tbl - > it_halfpoint ;
spin_lock_init ( & tbl - > it_lock ) ;
2008-10-21 17:38:10 +00:00
iommu_table_clear ( tbl ) ;
2005-06-20 21:43:48 +10:00
2005-04-16 15:20:36 -07:00
if ( ! welcomed ) {
printk ( KERN_INFO " IOMMU table initialized, virtual merging %s \n " ,
novmerge ? " disabled " : " enabled " ) ;
welcomed = 1 ;
}
return tbl ;
}
2007-12-06 13:39:19 +11:00
void iommu_free_table ( struct iommu_table * tbl , const char * node_name )
2005-04-16 15:20:36 -07:00
{
unsigned long bitmap_sz , i ;
unsigned int order ;
if ( ! tbl | | ! tbl - > it_map ) {
2008-03-29 08:21:07 +11:00
printk ( KERN_ERR " %s: expected TCE map for %s \n " , __func__ ,
2007-12-06 13:39:19 +11:00
node_name ) ;
2005-04-16 15:20:36 -07:00
return ;
}
/* verify that table contains no entries */
/* it_size is in entries, and we're examining 64 at a time */
for ( i = 0 ; i < ( tbl - > it_size / 64 ) ; i + + ) {
if ( tbl - > it_map [ i ] ! = 0 ) {
printk ( KERN_WARNING " %s: Unexpected TCEs for %s \n " ,
2008-03-29 08:21:07 +11:00
__func__ , node_name ) ;
2005-04-16 15:20:36 -07:00
break ;
}
}
/* calculate bitmap size in bytes */
bitmap_sz = ( tbl - > it_size + 7 ) / 8 ;
/* free bitmap */
order = get_order ( bitmap_sz ) ;
free_pages ( ( unsigned long ) tbl - > it_map , order ) ;
/* free table */
kfree ( tbl ) ;
}
/* Creates TCEs for a user provided buffer. The user buffer must be
2008-10-27 20:38:08 +00:00
* contiguous real kernel storage ( not vmalloc ) . The address passed here
* comprises a page address and offset into that page . The dma_addr_t
* returned will point to the same byte within the page as was passed in .
2005-04-16 15:20:36 -07:00
*/
2008-10-27 20:38:08 +00:00
dma_addr_t iommu_map_page ( struct device * dev , struct iommu_table * tbl ,
struct page * page , unsigned long offset , size_t size ,
unsigned long mask , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-16 15:20:36 -07:00
{
dma_addr_t dma_handle = DMA_ERROR_CODE ;
2008-10-27 20:38:08 +00:00
void * vaddr ;
2005-04-16 15:20:36 -07:00
unsigned long uaddr ;
2008-01-08 10:34:22 +11:00
unsigned int npages , align ;
2005-04-16 15:20:36 -07:00
BUG_ON ( direction = = DMA_NONE ) ;
2008-10-27 20:38:08 +00:00
vaddr = page_address ( page ) + offset ;
2005-04-16 15:20:36 -07:00
uaddr = ( unsigned long ) vaddr ;
2008-10-15 22:02:13 -07:00
npages = iommu_num_pages ( uaddr , size , IOMMU_PAGE_SIZE ) ;
2005-04-16 15:20:36 -07:00
if ( tbl ) {
2008-01-08 10:34:22 +11:00
align = 0 ;
if ( IOMMU_PAGE_SHIFT < PAGE_SHIFT & & size > = PAGE_SIZE & &
( ( unsigned long ) vaddr & ~ PAGE_MASK ) = = 0 )
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT ;
2008-02-04 22:28:08 -08:00
dma_handle = iommu_alloc ( dev , tbl , vaddr , npages , direction ,
2008-07-16 05:51:47 +10:00
mask > > IOMMU_PAGE_SHIFT , align ,
attrs ) ;
2005-04-16 15:20:36 -07:00
if ( dma_handle = = DMA_ERROR_CODE ) {
if ( printk_ratelimit ( ) ) {
printk ( KERN_INFO " iommu_alloc failed, "
" tbl %p vaddr %p npages %d \n " ,
tbl , vaddr , npages ) ;
}
} else
2006-10-30 16:15:59 +11:00
dma_handle | = ( uaddr & ~ IOMMU_PAGE_MASK ) ;
2005-04-16 15:20:36 -07:00
}
return dma_handle ;
}
2008-10-27 20:38:08 +00:00
void iommu_unmap_page ( struct iommu_table * tbl , dma_addr_t dma_handle ,
size_t size , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-16 15:20:36 -07:00
{
2006-10-30 16:15:59 +11:00
unsigned int npages ;
2005-04-16 15:20:36 -07:00
BUG_ON ( direction = = DMA_NONE ) ;
2006-10-30 16:15:59 +11:00
if ( tbl ) {
2008-10-15 22:02:13 -07:00
npages = iommu_num_pages ( dma_handle , size , IOMMU_PAGE_SIZE ) ;
2006-10-30 16:15:59 +11:00
iommu_free ( tbl , dma_handle , npages ) ;
}
2005-04-16 15:20:36 -07:00
}
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address ( mapping ) of the first page .
*/
2008-02-04 22:28:08 -08:00
void * iommu_alloc_coherent ( struct device * dev , struct iommu_table * tbl ,
size_t size , dma_addr_t * dma_handle ,
unsigned long mask , gfp_t flag , int node )
2005-04-16 15:20:36 -07:00
{
void * ret = NULL ;
dma_addr_t mapping ;
2006-10-30 16:15:59 +11:00
unsigned int order ;
unsigned int nio_pages , io_order ;
2006-06-06 16:11:35 +02:00
struct page * page ;
2005-04-16 15:20:36 -07:00
size = PAGE_ALIGN ( size ) ;
order = get_order ( size ) ;
/*
* Client asked for way too much space . This is checked later
* anyway . It is easier to debug here for the drivers than in
* the tce tables .
*/
if ( order > = IOMAP_MAX_ORDER ) {
printk ( " iommu_alloc_consistent size too large: 0x%lx \n " , size ) ;
return NULL ;
}
if ( ! tbl )
return NULL ;
/* Alloc enough pages (and possibly more) */
2006-06-10 18:17:35 +10:00
page = alloc_pages_node ( node , flag , order ) ;
2006-06-06 16:11:35 +02:00
if ( ! page )
2005-04-16 15:20:36 -07:00
return NULL ;
2006-06-06 16:11:35 +02:00
ret = page_address ( page ) ;
2005-04-16 15:20:36 -07:00
memset ( ret , 0 , size ) ;
/* Set up tces to cover the allocated range */
2006-10-30 16:15:59 +11:00
nio_pages = size > > IOMMU_PAGE_SHIFT ;
io_order = get_iommu_order ( size ) ;
2008-02-04 22:28:08 -08:00
mapping = iommu_alloc ( dev , tbl , ret , nio_pages , DMA_BIDIRECTIONAL ,
2008-07-16 05:51:47 +10:00
mask > > IOMMU_PAGE_SHIFT , io_order , NULL ) ;
2005-04-16 15:20:36 -07:00
if ( mapping = = DMA_ERROR_CODE ) {
free_pages ( ( unsigned long ) ret , order ) ;
2006-06-06 16:11:35 +02:00
return NULL ;
}
* dma_handle = mapping ;
2005-04-16 15:20:36 -07:00
return ret ;
}
void iommu_free_coherent ( struct iommu_table * tbl , size_t size ,
void * vaddr , dma_addr_t dma_handle )
{
if ( tbl ) {
2006-10-30 16:15:59 +11:00
unsigned int nio_pages ;
size = PAGE_ALIGN ( size ) ;
nio_pages = size > > IOMMU_PAGE_SHIFT ;
iommu_free ( tbl , dma_handle , nio_pages ) ;
2005-04-16 15:20:36 -07:00
size = PAGE_ALIGN ( size ) ;
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
}
}