2007-07-28 09:39:14 +04:00
/* iommu.c: Generic sparc64 IOMMU support.
2005-04-17 02:20:36 +04:00
*
2008-02-09 05:05:46 +03:00
* Copyright ( C ) 1999 , 2007 , 2008 David S . Miller ( davem @ davemloft . net )
2005-04-17 02:20:36 +04:00
* Copyright ( C ) 1999 , 2000 Jakub Jelinek ( jakub @ redhat . com )
*/
# include <linux/kernel.h>
2007-07-28 09:39:14 +04:00
# include <linux/module.h>
2005-05-11 22:37:00 +04:00
# include <linux/delay.h>
2007-07-28 09:39:14 +04:00
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/errno.h>
2008-02-09 05:05:46 +03:00
# include <linux/iommu-helper.h>
2009-12-16 03:48:28 +03:00
# include <linux/bitmap.h>
2007-07-28 09:39:14 +04:00
# ifdef CONFIG_PCI
2007-05-08 11:43:56 +04:00
# include <linux/pci.h>
2007-07-28 09:39:14 +04:00
# endif
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
# include <asm/iommu.h>
2005-04-17 02:20:36 +04:00
# include "iommu_common.h"
2007-07-28 09:39:14 +04:00
# define STC_CTXMATCH_ADDR(STC, CTX) \
2005-04-17 02:20:36 +04:00
( ( STC ) - > strbuf_ctxmatch_base + ( ( CTX ) < < 3 ) )
2007-07-28 09:39:14 +04:00
# define STC_FLUSHFLAG_INIT(STC) \
( * ( ( STC ) - > strbuf_flushflag ) = 0UL )
# define STC_FLUSHFLAG_SET(STC) \
( * ( ( STC ) - > strbuf_flushflag ) ! = 0UL )
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
# define iommu_read(__reg) \
2005-04-17 02:20:36 +04:00
( { u64 __ret ; \
__asm__ __volatile__ ( " ldxa [%1] %2, %0 " \
: " =r " ( __ret ) \
: " r " ( __reg ) , " i " ( ASI_PHYS_BYPASS_EC_E ) \
: " memory " ) ; \
__ret ; \
} )
2007-07-28 09:39:14 +04:00
# define iommu_write(__reg, __val) \
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ ( " stxa %0, [%1] %2 " \
: /* no outputs */ \
: " r " ( __val ) , " r " ( __reg ) , \
" i " ( ASI_PHYS_BYPASS_EC_E ) )
/* Must be invoked under the IOMMU lock. */
2008-02-09 05:05:46 +03:00
static void iommu_flushall ( struct iommu * iommu )
2005-04-17 02:20:36 +04:00
{
2007-05-03 04:31:36 +04:00
if ( iommu - > iommu_flushinv ) {
2007-07-28 09:39:14 +04:00
iommu_write ( iommu - > iommu_flushinv , ~ ( u64 ) 0 ) ;
2007-05-03 04:31:36 +04:00
} else {
unsigned long tag ;
int entry ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
tag = iommu - > iommu_tags ;
2007-05-03 04:31:36 +04:00
for ( entry = 0 ; entry < 16 ; entry + + ) {
2007-07-28 09:39:14 +04:00
iommu_write ( tag , 0 ) ;
2007-05-03 04:31:36 +04:00
tag + = 8 ;
}
2005-04-17 02:20:36 +04:00
2007-05-03 04:31:36 +04:00
/* Ensure completion of previous PIO writes. */
2007-07-28 09:39:14 +04:00
( void ) iommu_read ( iommu - > write_complete_reg ) ;
2007-05-03 04:31:36 +04:00
}
2005-04-17 02:20:36 +04:00
}
# define IOPTE_CONSISTENT(CTX) \
( IOPTE_VALID | IOPTE_CACHE | \
( ( ( CTX ) < < 47 ) & IOPTE_CONTEXT ) )
# define IOPTE_STREAMING(CTX) \
( IOPTE_CONSISTENT ( CTX ) | IOPTE_STBUF )
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page .
*/
# define IOPTE_IS_DUMMY(iommu, iopte) \
( ( iopte_val ( * iopte ) & IOPTE_PAGE ) = = ( iommu ) - > dummy_page_pa )
2007-04-27 08:08:21 +04:00
static inline void iopte_make_dummy ( struct iommu * iommu , iopte_t * iopte )
2005-04-17 02:20:36 +04:00
{
unsigned long val = iopte_val ( * iopte ) ;
val & = ~ IOPTE_PAGE ;
val | = iommu - > dummy_page_pa ;
iopte_val ( * iopte ) = val ;
}
2008-02-09 05:05:46 +03:00
/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
* facility it must all be done in one pass while under the iommu lock .
*
* On sun4u platforms , we only flush the IOMMU once every time we ' ve passed
* over the entire page table doing allocations . Therefore we only ever advance
* the hint and cannot backtrack it .
*/
unsigned long iommu_range_alloc ( struct device * dev ,
struct iommu * iommu ,
unsigned long npages ,
unsigned long * handle )
2005-10-14 09:15:24 +04:00
{
2008-02-09 05:05:46 +03:00
unsigned long n , end , start , limit , boundary_size ;
2007-04-25 10:51:18 +04:00
struct iommu_arena * arena = & iommu - > arena ;
2008-02-09 05:05:46 +03:00
int pass = 0 ;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if ( unlikely ( npages = = 0 ) ) {
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return DMA_ERROR_CODE ;
}
if ( handle & & * handle )
start = * handle ;
else
start = arena - > hint ;
2005-10-14 09:15:24 +04:00
limit = arena - > limit ;
2008-02-09 05:05:46 +03:00
/* The case below can happen if we have a small segment appended
* to a large , or when the previous alloc was at the very end of
* the available space . If so , go back to the beginning and flush .
*/
if ( start > = limit ) {
start = 0 ;
if ( iommu - > flush_all )
iommu - > flush_all ( iommu ) ;
}
again :
if ( dev )
boundary_size = ALIGN ( dma_get_seg_boundary ( dev ) + 1 ,
1 < < IO_PAGE_SHIFT ) ;
else
boundary_size = ALIGN ( 1UL < < 32 , 1 < < IO_PAGE_SHIFT ) ;
2008-02-21 09:56:42 +03:00
n = iommu_area_alloc ( arena - > map , limit , start , npages ,
iommu - > page_table_map_base > > IO_PAGE_SHIFT ,
2008-02-09 05:05:46 +03:00
boundary_size > > IO_PAGE_SHIFT , 0 ) ;
if ( n = = - 1 ) {
2005-10-14 09:15:24 +04:00
if ( likely ( pass < 1 ) ) {
2008-02-09 05:05:46 +03:00
/* First failure, rescan from the beginning. */
2005-10-14 09:15:24 +04:00
start = 0 ;
2008-02-09 05:05:46 +03:00
if ( iommu - > flush_all )
iommu - > flush_all ( iommu ) ;
2005-10-14 09:15:24 +04:00
pass + + ;
goto again ;
} else {
2008-02-09 05:05:46 +03:00
/* Second failure, give up */
return DMA_ERROR_CODE ;
2005-10-14 09:15:24 +04:00
}
}
2008-02-09 05:05:46 +03:00
end = n + npages ;
2005-10-14 09:15:24 +04:00
arena - > hint = end ;
2008-02-09 05:05:46 +03:00
/* Update handle for SG allocations */
if ( handle )
* handle = end ;
2005-10-14 09:15:24 +04:00
return n ;
}
2008-02-09 05:05:46 +03:00
void iommu_range_free ( struct iommu * iommu , dma_addr_t dma_addr , unsigned long npages )
2005-10-14 09:15:24 +04:00
{
2008-02-09 05:05:46 +03:00
struct iommu_arena * arena = & iommu - > arena ;
unsigned long entry ;
2005-10-14 09:15:24 +04:00
2008-02-09 05:05:46 +03:00
entry = ( dma_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ;
2009-12-16 03:48:28 +03:00
bitmap_clear ( arena - > map , entry , npages ) ;
2005-10-14 09:15:24 +04:00
}
2007-07-28 09:39:14 +04:00
int iommu_table_init ( struct iommu * iommu , int tsbsize ,
2008-03-19 14:52:48 +03:00
u32 dma_offset , u32 dma_addr_mask ,
int numa_node )
2005-04-17 02:20:36 +04:00
{
2008-03-19 14:52:48 +03:00
unsigned long i , order , sz , num_tsb_entries ;
struct page * page ;
2005-10-14 09:15:24 +04:00
num_tsb_entries = tsbsize / sizeof ( iopte_t ) ;
2005-10-14 08:10:08 +04:00
/* Setup initial software IOMMU state. */
spin_lock_init ( & iommu - > lock ) ;
iommu - > ctx_lowest_free = 1 ;
iommu - > page_table_map_base = dma_offset ;
iommu - > dma_addr_mask = dma_addr_mask ;
2005-10-14 09:15:24 +04:00
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8 ;
sz = ( sz + 7UL ) & ~ 7UL ;
2008-03-19 14:52:48 +03:00
iommu - > arena . map = kmalloc_node ( sz , GFP_KERNEL , numa_node ) ;
2005-10-14 09:15:24 +04:00
if ( ! iommu - > arena . map ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_ERR " IOMMU: Error, kmalloc(arena.map) failed. \n " ) ;
return - ENOMEM ;
2005-10-14 08:10:08 +04:00
}
2008-03-19 14:52:48 +03:00
memset ( iommu - > arena . map , 0 , sz ) ;
2005-10-14 09:15:24 +04:00
iommu - > arena . limit = num_tsb_entries ;
2005-04-17 02:20:36 +04:00
2008-02-09 05:05:46 +03:00
if ( tlb_type ! = hypervisor )
iommu - > flush_all = iommu_flushall ;
2005-10-14 08:10:08 +04:00
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to .
*/
2008-03-19 14:52:48 +03:00
page = alloc_pages_node ( numa_node , GFP_KERNEL , 0 ) ;
if ( ! page ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_ERR " IOMMU: Error, gfp(dummy_page) failed. \n " ) ;
goto out_free_map ;
2005-10-14 08:10:08 +04:00
}
2008-03-19 14:52:48 +03:00
iommu - > dummy_page = ( unsigned long ) page_address ( page ) ;
memset ( ( void * ) iommu - > dummy_page , 0 , PAGE_SIZE ) ;
2005-10-14 08:10:08 +04:00
iommu - > dummy_page_pa = ( unsigned long ) __pa ( iommu - > dummy_page ) ;
/* Now allocate and setup the IOMMU page table itself. */
order = get_order ( tsbsize ) ;
2008-03-19 14:52:48 +03:00
page = alloc_pages_node ( numa_node , GFP_KERNEL , order ) ;
if ( ! page ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_ERR " IOMMU: Error, gfp(tsb) failed. \n " ) ;
goto out_free_dummy_page ;
2005-10-14 08:10:08 +04:00
}
2008-03-19 14:52:48 +03:00
iommu - > page_table = ( iopte_t * ) page_address ( page ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
for ( i = 0 ; i < num_tsb_entries ; i + + )
2005-04-17 02:20:36 +04:00
iopte_make_dummy ( iommu , & iommu - > page_table [ i ] ) ;
2007-07-28 09:39:14 +04:00
return 0 ;
out_free_dummy_page :
free_page ( iommu - > dummy_page ) ;
iommu - > dummy_page = 0UL ;
out_free_map :
kfree ( iommu - > arena . map ) ;
iommu - > arena . map = NULL ;
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
2008-02-09 05:05:46 +03:00
static inline iopte_t * alloc_npages ( struct device * dev , struct iommu * iommu ,
unsigned long npages )
2005-04-17 02:20:36 +04:00
{
2008-02-09 05:05:46 +03:00
unsigned long entry ;
2005-04-17 02:20:36 +04:00
2008-02-09 05:05:46 +03:00
entry = iommu_range_alloc ( dev , iommu , npages , NULL ) ;
if ( unlikely ( entry = = DMA_ERROR_CODE ) )
2005-10-14 09:15:24 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
return iommu - > page_table + entry ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 08:08:21 +04:00
static int iommu_alloc_ctx ( struct iommu * iommu )
2005-06-01 03:57:59 +04:00
{
int lowest = iommu - > ctx_lowest_free ;
int sz = IOMMU_NUM_CTXS - lowest ;
int n = find_next_zero_bit ( iommu - > ctx_bitmap , sz , lowest ) ;
if ( unlikely ( n = = sz ) ) {
n = find_next_zero_bit ( iommu - > ctx_bitmap , lowest , 1 ) ;
if ( unlikely ( n = = lowest ) ) {
printk ( KERN_WARNING " IOMMU: Ran out of contexts. \n " ) ;
n = 0 ;
}
}
if ( n )
__set_bit ( n , iommu - > ctx_bitmap ) ;
return n ;
}
2007-04-27 08:08:21 +04:00
static inline void iommu_free_ctx ( struct iommu * iommu , int ctx )
2005-06-01 03:57:59 +04:00
{
if ( likely ( ctx ) ) {
__clear_bit ( ctx , iommu - > ctx_bitmap ) ;
if ( ctx < iommu - > ctx_lowest_free )
iommu - > ctx_lowest_free = ctx ;
}
}
2007-07-28 09:39:14 +04:00
static void * dma_4u_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_addrp , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
2008-03-19 14:52:48 +03:00
unsigned long flags , order , first_page ;
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
2008-03-19 14:52:48 +03:00
struct page * page ;
int npages , nid ;
2005-04-17 02:20:36 +04:00
iopte_t * iopte ;
void * ret ;
size = IO_PAGE_ALIGN ( size ) ;
order = get_order ( size ) ;
if ( order > = 10 )
return NULL ;
2008-03-19 14:52:48 +03:00
nid = dev - > archdata . numa_node ;
page = alloc_pages_node ( nid , gfp , order ) ;
if ( unlikely ( ! page ) )
2005-04-17 02:20:36 +04:00
return NULL ;
2008-03-19 14:52:48 +03:00
first_page = ( unsigned long ) page_address ( page ) ;
2005-04-17 02:20:36 +04:00
memset ( ( char * ) first_page , 0 , PAGE_SIZE < < order ) ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2008-02-09 05:05:46 +03:00
iopte = alloc_npages ( dev , iommu , size > > IO_PAGE_SHIFT ) ;
2005-10-14 09:15:24 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
if ( unlikely ( iopte = = NULL ) ) {
2005-04-17 02:20:36 +04:00
free_pages ( first_page , order ) ;
return NULL ;
}
* dma_addrp = ( iommu - > page_table_map_base +
( ( iopte - iommu - > page_table ) < < IO_PAGE_SHIFT ) ) ;
ret = ( void * ) first_page ;
npages = size > > IO_PAGE_SHIFT ;
first_page = __pa ( first_page ) ;
while ( npages - - ) {
2005-10-14 09:15:24 +04:00
iopte_val ( * iopte ) = ( IOPTE_CONSISTENT ( 0UL ) |
2005-04-17 02:20:36 +04:00
IOPTE_WRITE |
( first_page & IOPTE_PAGE ) ) ;
iopte + + ;
first_page + = IO_PAGE_SIZE ;
}
return ret ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_free_coherent ( struct device * dev , size_t size ,
void * cpu , dma_addr_t dvma )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
2005-04-17 02:20:36 +04:00
iopte_t * iopte ;
2005-10-14 09:15:24 +04:00
unsigned long flags , order , npages ;
2005-04-17 02:20:36 +04:00
npages = IO_PAGE_ALIGN ( size ) > > IO_PAGE_SHIFT ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
2005-04-17 02:20:36 +04:00
iopte = iommu - > page_table +
( ( dvma - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2008-02-09 05:05:46 +03:00
iommu_range_free ( iommu , dvma , npages ) ;
2005-06-01 03:57:59 +04:00
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
order = get_order ( size ) ;
if ( order < 10 )
free_pages ( ( unsigned long ) cpu , order ) ;
}
2009-05-14 20:23:10 +04:00
static dma_addr_t dma_4u_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t sz ,
2009-08-10 06:53:12 +04:00
enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
iopte_t * base ;
unsigned long flags , npages , oaddr ;
unsigned long i , base_paddr , ctx ;
u32 bus_addr , ret ;
unsigned long iopte_protection ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( unlikely ( direction = = DMA_NONE ) )
2005-10-14 09:15:24 +04:00
goto bad_no_ctx ;
2005-04-17 02:20:36 +04:00
2009-05-14 20:23:10 +04:00
oaddr = ( unsigned long ) ( page_address ( page ) + offset ) ;
2005-04-17 02:20:36 +04:00
npages = IO_PAGE_ALIGN ( oaddr + sz ) - ( oaddr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2008-02-09 05:05:46 +03:00
base = alloc_npages ( dev , iommu , npages ) ;
2005-10-14 09:15:24 +04:00
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = iommu_alloc_ctx ( iommu ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
if ( unlikely ( ! base ) )
2005-04-17 02:20:36 +04:00
goto bad ;
2005-10-14 09:15:24 +04:00
2005-04-17 02:20:36 +04:00
bus_addr = ( iommu - > page_table_map_base +
( ( base - iommu - > page_table ) < < IO_PAGE_SHIFT ) ) ;
ret = bus_addr | ( oaddr & ~ IO_PAGE_MASK ) ;
base_paddr = __pa ( oaddr & IO_PAGE_MASK ) ;
if ( strbuf - > strbuf_enabled )
iopte_protection = IOPTE_STREAMING ( ctx ) ;
else
iopte_protection = IOPTE_CONSISTENT ( ctx ) ;
2007-07-28 09:39:14 +04:00
if ( direction ! = DMA_TO_DEVICE )
2005-04-17 02:20:36 +04:00
iopte_protection | = IOPTE_WRITE ;
for ( i = 0 ; i < npages ; i + + , base + + , base_paddr + = IO_PAGE_SIZE )
iopte_val ( * base ) = iopte_protection | base_paddr ;
return ret ;
bad :
2005-10-14 09:15:24 +04:00
iommu_free_ctx ( iommu , ctx ) ;
bad_no_ctx :
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
2007-07-28 09:39:14 +04:00
return DMA_ERROR_CODE ;
2005-04-17 02:20:36 +04:00
}
2007-07-28 09:39:14 +04:00
static void strbuf_flush ( struct strbuf * strbuf , struct iommu * iommu ,
u32 vaddr , unsigned long ctx , unsigned long npages ,
enum dma_data_direction direction )
2005-05-11 22:37:00 +04:00
{
int limit ;
if ( strbuf - > strbuf_ctxflush & &
iommu - > iommu_ctxflush ) {
unsigned long matchreg , flushreg ;
2005-06-01 03:57:59 +04:00
u64 val ;
2005-05-11 22:37:00 +04:00
flushreg = strbuf - > strbuf_ctxflush ;
2007-07-28 09:39:14 +04:00
matchreg = STC_CTXMATCH_ADDR ( strbuf , ctx ) ;
2005-05-11 22:37:00 +04:00
2007-07-28 09:39:14 +04:00
iommu_write ( flushreg , ctx ) ;
val = iommu_read ( matchreg ) ;
2005-06-01 06:13:52 +04:00
val & = 0xffff ;
if ( ! val )
2005-06-01 03:57:59 +04:00
goto do_flush_sync ;
while ( val ) {
if ( val & 0x1 )
2007-07-28 09:39:14 +04:00
iommu_write ( flushreg , ctx ) ;
2005-06-01 03:57:59 +04:00
val > > = 1 ;
2005-05-20 22:40:32 +04:00
}
2007-07-28 09:39:14 +04:00
val = iommu_read ( matchreg ) ;
2005-06-01 03:57:59 +04:00
if ( unlikely ( val ) ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_WARNING " strbuf_flush: ctx flush "
2009-01-07 00:19:28 +03:00
" timeout matchreg[%llx] ctx[%lx] \n " ,
2005-06-01 03:57:59 +04:00
val , ctx ) ;
goto do_page_flush ;
}
2005-05-11 22:37:00 +04:00
} else {
unsigned long i ;
2005-06-01 03:57:59 +04:00
do_page_flush :
2005-05-11 22:37:00 +04:00
for ( i = 0 ; i < npages ; i + + , vaddr + = IO_PAGE_SIZE )
2007-07-28 09:39:14 +04:00
iommu_write ( strbuf - > strbuf_pflush , vaddr ) ;
2005-05-11 22:37:00 +04:00
}
2005-06-01 03:57:59 +04:00
do_flush_sync :
/* If the device could not have possibly put dirty data into
* the streaming cache , no flush - flag synchronization needs
* to be performed .
*/
2007-07-28 09:39:14 +04:00
if ( direction = = DMA_TO_DEVICE )
2005-06-01 03:57:59 +04:00
return ;
2007-07-28 09:39:14 +04:00
STC_FLUSHFLAG_INIT ( strbuf ) ;
iommu_write ( strbuf - > strbuf_fsync , strbuf - > strbuf_flushflag_pa ) ;
( void ) iommu_read ( iommu - > write_complete_reg ) ;
2005-05-11 22:37:00 +04:00
2005-05-20 22:40:32 +04:00
limit = 100000 ;
2007-07-28 09:39:14 +04:00
while ( ! STC_FLUSHFLAG_SET ( strbuf ) ) {
2005-05-11 22:37:00 +04:00
limit - - ;
if ( ! limit )
break ;
2005-05-20 22:40:32 +04:00
udelay ( 1 ) ;
2005-08-29 23:46:22 +04:00
rmb ( ) ;
2005-05-11 22:37:00 +04:00
}
if ( ! limit )
2007-07-28 09:39:14 +04:00
printk ( KERN_WARNING " strbuf_flush: flushflag timeout "
2005-05-11 22:37:00 +04:00
" vaddr[%08x] ctx[%lx] npages[%ld] \n " ,
vaddr , ctx , npages ) ;
}
2009-05-14 20:23:10 +04:00
static void dma_4u_unmap_page ( struct device * dev , dma_addr_t bus_addr ,
2009-08-10 06:53:12 +04:00
size_t sz , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
iopte_t * base ;
2005-10-14 09:15:24 +04:00
unsigned long flags , npages , ctx , i ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( unlikely ( direction = = DMA_NONE ) ) {
2005-10-14 09:15:24 +04:00
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return ;
}
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
npages = IO_PAGE_ALIGN ( bus_addr + sz ) - ( bus_addr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
base = iommu - > page_table +
( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
bus_addr & = IO_PAGE_MASK ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
/* Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = ( iopte_val ( * base ) & IOPTE_CONTEXT ) > > 47UL ;
/* Step 1: Kick data out of streaming buffers if necessary. */
2005-05-11 22:37:00 +04:00
if ( strbuf - > strbuf_enabled )
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx ,
npages , direction ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
/* Step 2: Clear out TSB entries. */
for ( i = 0 ; i < npages ; i + + )
iopte_make_dummy ( iommu , base + i ) ;
2005-04-17 02:20:36 +04:00
2008-02-09 05:05:46 +03:00
iommu_range_free ( iommu , bus_addr , npages ) ;
2005-04-17 02:20:36 +04:00
2005-06-01 03:57:59 +04:00
iommu_free_ctx ( iommu , ctx ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-07-28 09:39:14 +04:00
static int dma_4u_map_sg ( struct device * dev , struct scatterlist * sglist ,
2009-08-10 06:53:12 +04:00
int nelems , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-02-09 14:11:01 +03:00
struct scatterlist * s , * outs , * segstart ;
unsigned long flags , handle , prot , ctx ;
dma_addr_t dma_next = 0 , dma_addr ;
unsigned int max_seg_size ;
2008-03-29 01:55:41 +03:00
unsigned long seg_boundary_size ;
2008-02-09 14:11:01 +03:00
int outcount , incount , i ;
2007-04-27 08:08:21 +04:00
struct strbuf * strbuf ;
2008-02-06 14:50:26 +03:00
struct iommu * iommu ;
2008-03-29 01:55:41 +03:00
unsigned long base_shift ;
2008-02-09 14:11:01 +03:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2008-02-09 14:11:01 +03:00
if ( nelems = = 0 | | ! iommu )
return 0 ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2005-10-14 09:15:24 +04:00
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = iommu_alloc_ctx ( iommu ) ;
2005-04-17 02:20:36 +04:00
if ( strbuf - > strbuf_enabled )
2008-02-09 14:11:01 +03:00
prot = IOPTE_STREAMING ( ctx ) ;
2005-04-17 02:20:36 +04:00
else
2008-02-09 14:11:01 +03:00
prot = IOPTE_CONSISTENT ( ctx ) ;
2007-07-28 09:39:14 +04:00
if ( direction ! = DMA_TO_DEVICE )
2008-02-09 14:11:01 +03:00
prot | = IOPTE_WRITE ;
outs = s = segstart = & sglist [ 0 ] ;
outcount = 1 ;
incount = nelems ;
handle = 0 ;
/* Init first segment length for backout at failure */
outs - > dma_length = 0 ;
max_seg_size = dma_get_max_seg_size ( dev ) ;
2008-03-29 01:55:41 +03:00
seg_boundary_size = ALIGN ( dma_get_seg_boundary ( dev ) + 1 ,
IO_PAGE_SIZE ) > > IO_PAGE_SHIFT ;
base_shift = iommu - > page_table_map_base > > IO_PAGE_SHIFT ;
2008-02-09 14:11:01 +03:00
for_each_sg ( sglist , s , nelems , i ) {
2008-03-29 01:55:41 +03:00
unsigned long paddr , npages , entry , out_entry = 0 , slen ;
2008-02-09 14:11:01 +03:00
iopte_t * base ;
slen = s - > length ;
/* Sanity check */
if ( slen = = 0 ) {
dma_next = 0 ;
continue ;
}
/* Allocate iommu entries for that segment */
paddr = ( unsigned long ) SG_ENT_PHYS_ADDRESS ( s ) ;
2008-10-16 09:02:14 +04:00
npages = iommu_num_pages ( paddr , slen , IO_PAGE_SIZE ) ;
2008-02-09 14:11:01 +03:00
entry = iommu_range_alloc ( dev , iommu , npages , & handle ) ;
/* Handle failure */
if ( unlikely ( entry = = DMA_ERROR_CODE ) ) {
if ( printk_ratelimit ( ) )
printk ( KERN_INFO " iommu_alloc failed, iommu %p paddr %lx "
" npages %lx \n " , iommu , paddr , npages ) ;
goto iommu_map_failed ;
}
2005-10-14 09:15:24 +04:00
2008-02-09 14:11:01 +03:00
base = iommu - > page_table + entry ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
/* Convert entry to a dma_addr_t */
dma_addr = iommu - > page_table_map_base +
( entry < < IO_PAGE_SHIFT ) ;
dma_addr | = ( s - > offset & ~ IO_PAGE_MASK ) ;
2008-02-06 14:50:26 +03:00
2008-02-09 14:11:01 +03:00
/* Insert into HW table */
2008-02-06 14:50:26 +03:00
paddr & = IO_PAGE_MASK ;
2008-02-09 14:11:01 +03:00
while ( npages - - ) {
iopte_val ( * base ) = prot | paddr ;
2008-02-06 14:50:26 +03:00
base + + ;
paddr + = IO_PAGE_SIZE ;
}
2008-02-09 14:11:01 +03:00
/* If we are in an open segment, try merging */
if ( segstart ! = s ) {
/* We cannot merge if:
* - allocated dma_addr isn ' t contiguous to previous allocation
*/
if ( ( dma_addr ! = dma_next ) | |
2008-03-29 01:55:41 +03:00
( outs - > dma_length + s - > length > max_seg_size ) | |
( is_span_boundary ( out_entry , base_shift ,
seg_boundary_size , outs , s ) ) ) {
2008-02-09 14:11:01 +03:00
/* Can't merge: create a new segment */
segstart = s ;
outcount + + ;
outs = sg_next ( outs ) ;
} else {
outs - > dma_length + = s - > length ;
}
}
if ( segstart = = s ) {
/* This is a new segment, fill entries */
outs - > dma_address = dma_addr ;
outs - > dma_length = slen ;
2008-03-29 01:55:41 +03:00
out_entry = entry ;
2008-02-09 14:11:01 +03:00
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen ;
2008-02-06 14:50:26 +03:00
}
2008-02-09 14:11:01 +03:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
if ( outcount < incount ) {
outs = sg_next ( outs ) ;
outs - > dma_address = DMA_ERROR_CODE ;
outs - > dma_length = 0 ;
}
return outcount ;
iommu_map_failed :
for_each_sg ( sglist , s , nelems , i ) {
if ( s - > dma_length ! = 0 ) {
2008-03-26 08:44:10 +03:00
unsigned long vaddr , npages , entry , j ;
2008-02-09 14:11:01 +03:00
iopte_t * base ;
vaddr = s - > dma_address & IO_PAGE_MASK ;
2008-10-16 09:02:14 +04:00
npages = iommu_num_pages ( s - > dma_address , s - > dma_length ,
IO_PAGE_SIZE ) ;
2008-02-09 14:11:01 +03:00
iommu_range_free ( iommu , vaddr , npages ) ;
entry = ( vaddr - iommu - > page_table_map_base )
> > IO_PAGE_SHIFT ;
base = iommu - > page_table + entry ;
2008-03-26 08:44:10 +03:00
for ( j = 0 ; j < npages ; j + + )
iopte_make_dummy ( iommu , base + j ) ;
2008-02-09 14:11:01 +03:00
s - > dma_address = DMA_ERROR_CODE ;
s - > dma_length = 0 ;
}
if ( s = = outs )
break ;
}
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-02-09 14:11:01 +03:00
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG .
*/
static unsigned long fetch_sg_ctx ( struct iommu * iommu , struct scatterlist * sg )
{
unsigned long ctx = 0 ;
if ( iommu - > iommu_ctxflush ) {
iopte_t * base ;
u32 bus_addr ;
bus_addr = sg - > dma_address & IO_PAGE_MASK ;
base = iommu - > page_table +
( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
ctx = ( iopte_val ( * base ) & IOPTE_CONTEXT ) > > 47UL ;
}
return ctx ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_unmap_sg ( struct device * dev , struct scatterlist * sglist ,
2009-08-10 06:53:12 +04:00
int nelems , enum dma_data_direction direction ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-02-09 14:11:01 +03:00
unsigned long flags , ctx ;
struct scatterlist * sg ;
2007-04-27 08:08:21 +04:00
struct strbuf * strbuf ;
2008-02-06 14:50:26 +03:00
struct iommu * iommu ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
BUG_ON ( direction = = DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2008-02-09 14:11:01 +03:00
ctx = fetch_sg_ctx ( iommu , sglist ) ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
sg = sglist ;
while ( nelems - - ) {
dma_addr_t dma_handle = sg - > dma_address ;
unsigned int len = sg - > dma_length ;
unsigned long npages , entry ;
iopte_t * base ;
int i ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
if ( ! len )
break ;
2008-10-16 09:02:14 +04:00
npages = iommu_num_pages ( dma_handle , len , IO_PAGE_SIZE ) ;
2008-02-09 14:11:01 +03:00
iommu_range_free ( iommu , dma_handle , npages ) ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
entry = ( ( dma_handle - iommu - > page_table_map_base )
> > IO_PAGE_SHIFT ) ;
base = iommu - > page_table + entry ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
dma_handle & = IO_PAGE_MASK ;
if ( strbuf - > strbuf_enabled )
strbuf_flush ( strbuf , iommu , dma_handle , ctx ,
npages , direction ) ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
for ( i = 0 ; i < npages ; i + + )
iopte_make_dummy ( iommu , base + i ) ;
2005-04-17 02:20:36 +04:00
2008-02-09 14:11:01 +03:00
sg = sg_next ( sg ) ;
}
2005-04-17 02:20:36 +04:00
2005-06-01 03:57:59 +04:00
iommu_free_ctx ( iommu , ctx ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_sync_single_for_cpu ( struct device * dev ,
dma_addr_t bus_addr , size_t sz ,
enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
unsigned long flags , ctx , npages ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
if ( ! strbuf - > strbuf_enabled )
return ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
npages = IO_PAGE_ALIGN ( bus_addr + sz ) - ( bus_addr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
bus_addr & = IO_PAGE_MASK ;
/* Step 1: Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush & &
strbuf - > strbuf_ctxflush ) {
iopte_t * iopte ;
iopte = iommu - > page_table +
( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
ctx = ( iopte_val ( * iopte ) & IOPTE_CONTEXT ) > > 47UL ;
}
/* Step 2: Kick data out of streaming buffers. */
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx , npages , direction ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sglist , int nelems ,
enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-05-11 22:37:00 +04:00
unsigned long flags , ctx , npages , i ;
2007-08-07 11:37:10 +04:00
struct scatterlist * sg , * sgprv ;
2005-05-11 22:37:00 +04:00
u32 bus_addr ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
if ( ! strbuf - > strbuf_enabled )
return ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
/* Step 1: Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush & &
strbuf - > strbuf_ctxflush ) {
iopte_t * iopte ;
iopte = iommu - > page_table +
( ( sglist [ 0 ] . dma_address - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
ctx = ( iopte_val ( * iopte ) & IOPTE_CONTEXT ) > > 47UL ;
}
/* Step 2: Kick data out of streaming buffers. */
2005-05-11 22:37:00 +04:00
bus_addr = sglist [ 0 ] . dma_address & IO_PAGE_MASK ;
2007-08-07 11:37:10 +04:00
sgprv = NULL ;
for_each_sg ( sglist , sg , nelems , i ) {
if ( sg - > dma_length = = 0 )
2005-05-11 22:37:00 +04:00
break ;
2007-08-07 11:37:10 +04:00
sgprv = sg ;
}
npages = ( IO_PAGE_ALIGN ( sgprv - > dma_address + sgprv - > dma_length )
2005-05-11 22:37:00 +04:00
- bus_addr ) > > IO_PAGE_SHIFT ;
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx , npages , direction ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2009-08-10 06:53:13 +04:00
static struct dma_map_ops sun4u_dma_ops = {
2007-07-28 09:39:14 +04:00
. alloc_coherent = dma_4u_alloc_coherent ,
. free_coherent = dma_4u_free_coherent ,
2009-05-14 20:23:10 +04:00
. map_page = dma_4u_map_page ,
. unmap_page = dma_4u_unmap_page ,
2007-07-28 09:39:14 +04:00
. map_sg = dma_4u_map_sg ,
. unmap_sg = dma_4u_unmap_sg ,
. sync_single_for_cpu = dma_4u_sync_single_for_cpu ,
. sync_sg_for_cpu = dma_4u_sync_sg_for_cpu ,
2006-02-10 08:32:07 +03:00
} ;
2009-08-10 06:53:13 +04:00
struct dma_map_ops * dma_ops = & sun4u_dma_ops ;
2007-07-28 09:39:14 +04:00
EXPORT_SYMBOL ( dma_ops ) ;
2005-04-17 02:20:36 +04:00
2009-08-10 06:53:16 +04:00
extern int pci64_dma_supported ( struct pci_dev * pdev , u64 device_mask ) ;
2007-07-28 09:39:14 +04:00
int dma_supported ( struct device * dev , u64 device_mask )
2005-04-17 02:20:36 +04:00
{
2007-07-28 09:39:14 +04:00
struct iommu * iommu = dev - > archdata . iommu ;
u64 dma_addr_mask = iommu - > dma_addr_mask ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( device_mask > = ( 1UL < < 32UL ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( ( device_mask & dma_addr_mask ) = = dma_addr_mask )
return 1 ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
# ifdef CONFIG_PCI
if ( dev - > bus = = & pci_bus_type )
2009-08-10 06:53:16 +04:00
return pci64_dma_supported ( to_pci_dev ( dev ) , device_mask ) ;
2007-07-28 09:39:14 +04:00
# endif
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
return 0 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
int dma_set_mask ( struct device * dev , u64 dma_mask )
{
# ifdef CONFIG_PCI
if ( dev - > bus = = & pci_bus_type )
return pci_set_dma_mask ( to_pci_dev ( dev ) , dma_mask ) ;
# endif
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2007-07-28 09:39:14 +04:00
EXPORT_SYMBOL ( dma_set_mask ) ;