2007-07-28 09:39:14 +04:00
/* iommu.c: Generic sparc64 IOMMU support.
2005-04-17 02:20:36 +04:00
*
2007-04-27 08:08:21 +04:00
* Copyright ( C ) 1999 , 2007 David S . Miller ( davem @ davemloft . net )
2005-04-17 02:20:36 +04:00
* Copyright ( C ) 1999 , 2000 Jakub Jelinek ( jakub @ redhat . com )
*/
# include <linux/kernel.h>
2007-07-28 09:39:14 +04:00
# include <linux/module.h>
2005-05-11 22:37:00 +04:00
# include <linux/delay.h>
2007-07-28 09:39:14 +04:00
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/errno.h>
# ifdef CONFIG_PCI
2007-05-08 11:43:56 +04:00
# include <linux/pci.h>
2007-07-28 09:39:14 +04:00
# endif
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
# include <asm/iommu.h>
2005-04-17 02:20:36 +04:00
# include "iommu_common.h"
2007-07-28 09:39:14 +04:00
# define STC_CTXMATCH_ADDR(STC, CTX) \
2005-04-17 02:20:36 +04:00
( ( STC ) - > strbuf_ctxmatch_base + ( ( CTX ) < < 3 ) )
2007-07-28 09:39:14 +04:00
# define STC_FLUSHFLAG_INIT(STC) \
( * ( ( STC ) - > strbuf_flushflag ) = 0UL )
# define STC_FLUSHFLAG_SET(STC) \
( * ( ( STC ) - > strbuf_flushflag ) ! = 0UL )
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
# define iommu_read(__reg) \
2005-04-17 02:20:36 +04:00
( { u64 __ret ; \
__asm__ __volatile__ ( " ldxa [%1] %2, %0 " \
: " =r " ( __ret ) \
: " r " ( __reg ) , " i " ( ASI_PHYS_BYPASS_EC_E ) \
: " memory " ) ; \
__ret ; \
} )
2007-07-28 09:39:14 +04:00
# define iommu_write(__reg, __val) \
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ ( " stxa %0, [%1] %2 " \
: /* no outputs */ \
: " r " ( __val ) , " r " ( __reg ) , \
" i " ( ASI_PHYS_BYPASS_EC_E ) )
/* Must be invoked under the IOMMU lock. */
2007-04-27 08:08:21 +04:00
static void __iommu_flushall ( struct iommu * iommu )
2005-04-17 02:20:36 +04:00
{
2007-05-03 04:31:36 +04:00
if ( iommu - > iommu_flushinv ) {
2007-07-28 09:39:14 +04:00
iommu_write ( iommu - > iommu_flushinv , ~ ( u64 ) 0 ) ;
2007-05-03 04:31:36 +04:00
} else {
unsigned long tag ;
int entry ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
tag = iommu - > iommu_tags ;
2007-05-03 04:31:36 +04:00
for ( entry = 0 ; entry < 16 ; entry + + ) {
2007-07-28 09:39:14 +04:00
iommu_write ( tag , 0 ) ;
2007-05-03 04:31:36 +04:00
tag + = 8 ;
}
2005-04-17 02:20:36 +04:00
2007-05-03 04:31:36 +04:00
/* Ensure completion of previous PIO writes. */
2007-07-28 09:39:14 +04:00
( void ) iommu_read ( iommu - > write_complete_reg ) ;
2007-05-03 04:31:36 +04:00
}
2005-04-17 02:20:36 +04:00
}
# define IOPTE_CONSISTENT(CTX) \
( IOPTE_VALID | IOPTE_CACHE | \
( ( ( CTX ) < < 47 ) & IOPTE_CONTEXT ) )
# define IOPTE_STREAMING(CTX) \
( IOPTE_CONSISTENT ( CTX ) | IOPTE_STBUF )
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page .
*/
# define IOPTE_IS_DUMMY(iommu, iopte) \
( ( iopte_val ( * iopte ) & IOPTE_PAGE ) = = ( iommu ) - > dummy_page_pa )
2007-04-27 08:08:21 +04:00
static inline void iopte_make_dummy ( struct iommu * iommu , iopte_t * iopte )
2005-04-17 02:20:36 +04:00
{
unsigned long val = iopte_val ( * iopte ) ;
val & = ~ IOPTE_PAGE ;
val | = iommu - > dummy_page_pa ;
iopte_val ( * iopte ) = val ;
}
2005-10-14 09:15:24 +04:00
/* Based largely upon the ppc64 iommu allocator. */
2007-07-28 09:39:14 +04:00
static long arena_alloc ( struct iommu * iommu , unsigned long npages )
2005-10-14 09:15:24 +04:00
{
2007-04-25 10:51:18 +04:00
struct iommu_arena * arena = & iommu - > arena ;
2005-10-14 09:15:24 +04:00
unsigned long n , i , start , end , limit ;
int pass ;
limit = arena - > limit ;
start = arena - > hint ;
pass = 0 ;
again :
n = find_next_zero_bit ( arena - > map , limit , start ) ;
end = n + npages ;
if ( unlikely ( end > = limit ) ) {
if ( likely ( pass < 1 ) ) {
limit = start ;
start = 0 ;
__iommu_flushall ( iommu ) ;
pass + + ;
goto again ;
} else {
/* Scanned the whole thing, give up. */
return - 1 ;
}
}
for ( i = n ; i < end ; i + + ) {
if ( test_bit ( i , arena - > map ) ) {
start = i + 1 ;
goto again ;
}
}
for ( i = n ; i < end ; i + + )
__set_bit ( i , arena - > map ) ;
arena - > hint = end ;
return n ;
}
2007-07-28 09:39:14 +04:00
static void arena_free ( struct iommu_arena * arena , unsigned long base , unsigned long npages )
2005-10-14 09:15:24 +04:00
{
unsigned long i ;
for ( i = base ; i < ( base + npages ) ; i + + )
__clear_bit ( i , arena - > map ) ;
}
2007-07-28 09:39:14 +04:00
int iommu_table_init ( struct iommu * iommu , int tsbsize ,
u32 dma_offset , u32 dma_addr_mask )
2005-04-17 02:20:36 +04:00
{
2005-10-14 09:15:24 +04:00
unsigned long i , tsbbase , order , sz , num_tsb_entries ;
num_tsb_entries = tsbsize / sizeof ( iopte_t ) ;
2005-10-14 08:10:08 +04:00
/* Setup initial software IOMMU state. */
spin_lock_init ( & iommu - > lock ) ;
iommu - > ctx_lowest_free = 1 ;
iommu - > page_table_map_base = dma_offset ;
iommu - > dma_addr_mask = dma_addr_mask ;
2005-10-14 09:15:24 +04:00
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8 ;
sz = ( sz + 7UL ) & ~ 7UL ;
2006-03-07 00:48:40 +03:00
iommu - > arena . map = kzalloc ( sz , GFP_KERNEL ) ;
2005-10-14 09:15:24 +04:00
if ( ! iommu - > arena . map ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_ERR " IOMMU: Error, kmalloc(arena.map) failed. \n " ) ;
return - ENOMEM ;
2005-10-14 08:10:08 +04:00
}
2005-10-14 09:15:24 +04:00
iommu - > arena . limit = num_tsb_entries ;
2005-04-17 02:20:36 +04:00
2005-10-14 08:10:08 +04:00
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to .
*/
iommu - > dummy_page = __get_free_pages ( GFP_KERNEL , 0 ) ;
if ( ! iommu - > dummy_page ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_ERR " IOMMU: Error, gfp(dummy_page) failed. \n " ) ;
goto out_free_map ;
2005-10-14 08:10:08 +04:00
}
memset ( ( void * ) iommu - > dummy_page , 0 , PAGE_SIZE ) ;
iommu - > dummy_page_pa = ( unsigned long ) __pa ( iommu - > dummy_page ) ;
/* Now allocate and setup the IOMMU page table itself. */
order = get_order ( tsbsize ) ;
tsbbase = __get_free_pages ( GFP_KERNEL , order ) ;
if ( ! tsbbase ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_ERR " IOMMU: Error, gfp(tsb) failed. \n " ) ;
goto out_free_dummy_page ;
2005-10-14 08:10:08 +04:00
}
iommu - > page_table = ( iopte_t * ) tsbbase ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
for ( i = 0 ; i < num_tsb_entries ; i + + )
2005-04-17 02:20:36 +04:00
iopte_make_dummy ( iommu , & iommu - > page_table [ i ] ) ;
2007-07-28 09:39:14 +04:00
return 0 ;
out_free_dummy_page :
free_page ( iommu - > dummy_page ) ;
iommu - > dummy_page = 0UL ;
out_free_map :
kfree ( iommu - > arena . map ) ;
iommu - > arena . map = NULL ;
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 08:08:21 +04:00
static inline iopte_t * alloc_npages ( struct iommu * iommu , unsigned long npages )
2005-04-17 02:20:36 +04:00
{
2005-10-14 09:15:24 +04:00
long entry ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
entry = arena_alloc ( iommu , npages ) ;
2005-10-14 09:15:24 +04:00
if ( unlikely ( entry < 0 ) )
return NULL ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
return iommu - > page_table + entry ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 08:08:21 +04:00
static inline void free_npages ( struct iommu * iommu , dma_addr_t base , unsigned long npages )
2005-04-17 02:20:36 +04:00
{
2007-07-28 09:39:14 +04:00
arena_free ( & iommu - > arena , base > > IO_PAGE_SHIFT , npages ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 08:08:21 +04:00
static int iommu_alloc_ctx ( struct iommu * iommu )
2005-06-01 03:57:59 +04:00
{
int lowest = iommu - > ctx_lowest_free ;
int sz = IOMMU_NUM_CTXS - lowest ;
int n = find_next_zero_bit ( iommu - > ctx_bitmap , sz , lowest ) ;
if ( unlikely ( n = = sz ) ) {
n = find_next_zero_bit ( iommu - > ctx_bitmap , lowest , 1 ) ;
if ( unlikely ( n = = lowest ) ) {
printk ( KERN_WARNING " IOMMU: Ran out of contexts. \n " ) ;
n = 0 ;
}
}
if ( n )
__set_bit ( n , iommu - > ctx_bitmap ) ;
return n ;
}
2007-04-27 08:08:21 +04:00
static inline void iommu_free_ctx ( struct iommu * iommu , int ctx )
2005-06-01 03:57:59 +04:00
{
if ( likely ( ctx ) ) {
__clear_bit ( ctx , iommu - > ctx_bitmap ) ;
if ( ctx < iommu - > ctx_lowest_free )
iommu - > ctx_lowest_free = ctx ;
}
}
2007-07-28 09:39:14 +04:00
static void * dma_4u_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t * dma_addrp , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
2005-04-17 02:20:36 +04:00
iopte_t * iopte ;
2005-10-14 09:15:24 +04:00
unsigned long flags , order , first_page ;
2005-04-17 02:20:36 +04:00
void * ret ;
int npages ;
size = IO_PAGE_ALIGN ( size ) ;
order = get_order ( size ) ;
if ( order > = 10 )
return NULL ;
2006-05-23 13:07:22 +04:00
first_page = __get_free_pages ( gfp , order ) ;
2005-04-17 02:20:36 +04:00
if ( first_page = = 0UL )
return NULL ;
memset ( ( char * ) first_page , 0 , PAGE_SIZE < < order ) ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2005-10-14 09:15:24 +04:00
iopte = alloc_npages ( iommu , size > > IO_PAGE_SHIFT ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
if ( unlikely ( iopte = = NULL ) ) {
2005-04-17 02:20:36 +04:00
free_pages ( first_page , order ) ;
return NULL ;
}
* dma_addrp = ( iommu - > page_table_map_base +
( ( iopte - iommu - > page_table ) < < IO_PAGE_SHIFT ) ) ;
ret = ( void * ) first_page ;
npages = size > > IO_PAGE_SHIFT ;
first_page = __pa ( first_page ) ;
while ( npages - - ) {
2005-10-14 09:15:24 +04:00
iopte_val ( * iopte ) = ( IOPTE_CONSISTENT ( 0UL ) |
2005-04-17 02:20:36 +04:00
IOPTE_WRITE |
( first_page & IOPTE_PAGE ) ) ;
iopte + + ;
first_page + = IO_PAGE_SIZE ;
}
return ret ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_free_coherent ( struct device * dev , size_t size ,
void * cpu , dma_addr_t dvma )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
2005-04-17 02:20:36 +04:00
iopte_t * iopte ;
2005-10-14 09:15:24 +04:00
unsigned long flags , order , npages ;
2005-04-17 02:20:36 +04:00
npages = IO_PAGE_ALIGN ( size ) > > IO_PAGE_SHIFT ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
2005-04-17 02:20:36 +04:00
iopte = iommu - > page_table +
( ( dvma - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2006-10-26 09:33:07 +04:00
free_npages ( iommu , dvma - iommu - > page_table_map_base , npages ) ;
2005-06-01 03:57:59 +04:00
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
order = get_order ( size ) ;
if ( order < 10 )
free_pages ( ( unsigned long ) cpu , order ) ;
}
2007-07-28 09:39:14 +04:00
static dma_addr_t dma_4u_map_single ( struct device * dev , void * ptr , size_t sz ,
enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
iopte_t * base ;
unsigned long flags , npages , oaddr ;
unsigned long i , base_paddr , ctx ;
u32 bus_addr , ret ;
unsigned long iopte_protection ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( unlikely ( direction = = DMA_NONE ) )
2005-10-14 09:15:24 +04:00
goto bad_no_ctx ;
2005-04-17 02:20:36 +04:00
oaddr = ( unsigned long ) ptr ;
npages = IO_PAGE_ALIGN ( oaddr + sz ) - ( oaddr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2005-10-14 09:15:24 +04:00
base = alloc_npages ( iommu , npages ) ;
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = iommu_alloc_ctx ( iommu ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
if ( unlikely ( ! base ) )
2005-04-17 02:20:36 +04:00
goto bad ;
2005-10-14 09:15:24 +04:00
2005-04-17 02:20:36 +04:00
bus_addr = ( iommu - > page_table_map_base +
( ( base - iommu - > page_table ) < < IO_PAGE_SHIFT ) ) ;
ret = bus_addr | ( oaddr & ~ IO_PAGE_MASK ) ;
base_paddr = __pa ( oaddr & IO_PAGE_MASK ) ;
if ( strbuf - > strbuf_enabled )
iopte_protection = IOPTE_STREAMING ( ctx ) ;
else
iopte_protection = IOPTE_CONSISTENT ( ctx ) ;
2007-07-28 09:39:14 +04:00
if ( direction ! = DMA_TO_DEVICE )
2005-04-17 02:20:36 +04:00
iopte_protection | = IOPTE_WRITE ;
for ( i = 0 ; i < npages ; i + + , base + + , base_paddr + = IO_PAGE_SIZE )
iopte_val ( * base ) = iopte_protection | base_paddr ;
return ret ;
bad :
2005-10-14 09:15:24 +04:00
iommu_free_ctx ( iommu , ctx ) ;
bad_no_ctx :
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
2007-07-28 09:39:14 +04:00
return DMA_ERROR_CODE ;
2005-04-17 02:20:36 +04:00
}
2007-07-28 09:39:14 +04:00
static void strbuf_flush ( struct strbuf * strbuf , struct iommu * iommu ,
u32 vaddr , unsigned long ctx , unsigned long npages ,
enum dma_data_direction direction )
2005-05-11 22:37:00 +04:00
{
int limit ;
if ( strbuf - > strbuf_ctxflush & &
iommu - > iommu_ctxflush ) {
unsigned long matchreg , flushreg ;
2005-06-01 03:57:59 +04:00
u64 val ;
2005-05-11 22:37:00 +04:00
flushreg = strbuf - > strbuf_ctxflush ;
2007-07-28 09:39:14 +04:00
matchreg = STC_CTXMATCH_ADDR ( strbuf , ctx ) ;
2005-05-11 22:37:00 +04:00
2007-07-28 09:39:14 +04:00
iommu_write ( flushreg , ctx ) ;
val = iommu_read ( matchreg ) ;
2005-06-01 06:13:52 +04:00
val & = 0xffff ;
if ( ! val )
2005-06-01 03:57:59 +04:00
goto do_flush_sync ;
while ( val ) {
if ( val & 0x1 )
2007-07-28 09:39:14 +04:00
iommu_write ( flushreg , ctx ) ;
2005-06-01 03:57:59 +04:00
val > > = 1 ;
2005-05-20 22:40:32 +04:00
}
2007-07-28 09:39:14 +04:00
val = iommu_read ( matchreg ) ;
2005-06-01 03:57:59 +04:00
if ( unlikely ( val ) ) {
2007-07-28 09:39:14 +04:00
printk ( KERN_WARNING " strbuf_flush: ctx flush "
2005-06-01 03:57:59 +04:00
" timeout matchreg[%lx] ctx[%lx] \n " ,
val , ctx ) ;
goto do_page_flush ;
}
2005-05-11 22:37:00 +04:00
} else {
unsigned long i ;
2005-06-01 03:57:59 +04:00
do_page_flush :
2005-05-11 22:37:00 +04:00
for ( i = 0 ; i < npages ; i + + , vaddr + = IO_PAGE_SIZE )
2007-07-28 09:39:14 +04:00
iommu_write ( strbuf - > strbuf_pflush , vaddr ) ;
2005-05-11 22:37:00 +04:00
}
2005-06-01 03:57:59 +04:00
do_flush_sync :
/* If the device could not have possibly put dirty data into
* the streaming cache , no flush - flag synchronization needs
* to be performed .
*/
2007-07-28 09:39:14 +04:00
if ( direction = = DMA_TO_DEVICE )
2005-06-01 03:57:59 +04:00
return ;
2007-07-28 09:39:14 +04:00
STC_FLUSHFLAG_INIT ( strbuf ) ;
iommu_write ( strbuf - > strbuf_fsync , strbuf - > strbuf_flushflag_pa ) ;
( void ) iommu_read ( iommu - > write_complete_reg ) ;
2005-05-11 22:37:00 +04:00
2005-05-20 22:40:32 +04:00
limit = 100000 ;
2007-07-28 09:39:14 +04:00
while ( ! STC_FLUSHFLAG_SET ( strbuf ) ) {
2005-05-11 22:37:00 +04:00
limit - - ;
if ( ! limit )
break ;
2005-05-20 22:40:32 +04:00
udelay ( 1 ) ;
2005-08-29 23:46:22 +04:00
rmb ( ) ;
2005-05-11 22:37:00 +04:00
}
if ( ! limit )
2007-07-28 09:39:14 +04:00
printk ( KERN_WARNING " strbuf_flush: flushflag timeout "
2005-05-11 22:37:00 +04:00
" vaddr[%08x] ctx[%lx] npages[%ld] \n " ,
vaddr , ctx , npages ) ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_unmap_single ( struct device * dev , dma_addr_t bus_addr ,
size_t sz , enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
iopte_t * base ;
2005-10-14 09:15:24 +04:00
unsigned long flags , npages , ctx , i ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( unlikely ( direction = = DMA_NONE ) ) {
2005-10-14 09:15:24 +04:00
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return ;
}
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
npages = IO_PAGE_ALIGN ( bus_addr + sz ) - ( bus_addr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
base = iommu - > page_table +
( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
bus_addr & = IO_PAGE_MASK ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
/* Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = ( iopte_val ( * base ) & IOPTE_CONTEXT ) > > 47UL ;
/* Step 1: Kick data out of streaming buffers if necessary. */
2005-05-11 22:37:00 +04:00
if ( strbuf - > strbuf_enabled )
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx ,
npages , direction ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
/* Step 2: Clear out TSB entries. */
for ( i = 0 ; i < npages ; i + + )
iopte_make_dummy ( iommu , base + i ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
free_npages ( iommu , bus_addr - iommu - > page_table_map_base , npages ) ;
2005-04-17 02:20:36 +04:00
2005-06-01 03:57:59 +04:00
iommu_free_ctx ( iommu , ctx ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-10-22 22:02:46 +04:00
# define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
2005-04-17 02:20:36 +04:00
2007-10-17 15:08:48 +04:00
static void fill_sg ( iopte_t * iopte , struct scatterlist * sg ,
int nused , int nelems ,
unsigned long iopte_protection )
2005-04-17 02:20:36 +04:00
{
struct scatterlist * dma_sg = sg ;
int i ;
for ( i = 0 ; i < nused ; i + + ) {
unsigned long pteval = ~ 0UL ;
u32 dma_npages ;
dma_npages = ( ( dma_sg - > dma_address & ( IO_PAGE_SIZE - 1UL ) ) +
dma_sg - > dma_length +
( ( IO_PAGE_SIZE - 1UL ) ) ) > > IO_PAGE_SHIFT ;
do {
unsigned long offset ;
signed int len ;
/* If we are here, we know we have at least one
* more page to map . So walk forward until we
* hit a page crossing , and begin creating new
* mappings from that spot .
*/
for ( ; ; ) {
unsigned long tmp ;
tmp = SG_ENT_PHYS_ADDRESS ( sg ) ;
len = sg - > length ;
if ( ( ( tmp ^ pteval ) > > IO_PAGE_SHIFT ) ! = 0UL ) {
pteval = tmp & IO_PAGE_MASK ;
offset = tmp & ( IO_PAGE_SIZE - 1UL ) ;
break ;
}
if ( ( ( tmp ^ ( tmp + len - 1UL ) ) > > IO_PAGE_SHIFT ) ! = 0UL ) {
pteval = ( tmp + IO_PAGE_SIZE ) & IO_PAGE_MASK ;
offset = 0UL ;
len - = ( IO_PAGE_SIZE - ( tmp & ( IO_PAGE_SIZE - 1UL ) ) ) ;
break ;
}
2007-08-07 11:37:10 +04:00
sg = sg_next ( sg ) ;
2007-10-17 15:08:48 +04:00
nelems - - ;
2005-04-17 02:20:36 +04:00
}
pteval = iopte_protection | ( pteval & IOPTE_PAGE ) ;
while ( len > 0 ) {
* iopte + + = __iopte ( pteval ) ;
pteval + = IO_PAGE_SIZE ;
len - = ( IO_PAGE_SIZE - offset ) ;
offset = 0 ;
dma_npages - - ;
}
pteval = ( pteval & IOPTE_PAGE ) + len ;
2007-08-07 11:37:10 +04:00
sg = sg_next ( sg ) ;
2007-10-17 15:08:48 +04:00
nelems - - ;
2005-04-17 02:20:36 +04:00
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way . Stop when we
* detect a page crossing event .
*/
2007-10-17 15:08:48 +04:00
while ( nelems & &
2005-04-17 02:20:36 +04:00
( pteval < < ( 64 - IO_PAGE_SHIFT ) ) ! = 0UL & &
( pteval = = SG_ENT_PHYS_ADDRESS ( sg ) ) & &
( ( pteval ^
( SG_ENT_PHYS_ADDRESS ( sg ) + sg - > length - 1UL ) ) > > IO_PAGE_SHIFT ) = = 0UL ) {
pteval + = sg - > length ;
2007-08-07 11:37:10 +04:00
sg = sg_next ( sg ) ;
2007-10-17 15:08:48 +04:00
nelems - - ;
2005-04-17 02:20:36 +04:00
}
if ( ( pteval < < ( 64 - IO_PAGE_SHIFT ) ) = = 0UL )
pteval = ~ 0UL ;
} while ( dma_npages ! = 0 ) ;
2007-08-07 11:37:10 +04:00
dma_sg = sg_next ( dma_sg ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-07-28 09:39:14 +04:00
static int dma_4u_map_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
unsigned long flags , ctx , npages , iopte_protection ;
iopte_t * base ;
u32 dma_base ;
struct scatterlist * sgtmp ;
int used ;
/* Fast path single entry scatterlists. */
if ( nelems = = 1 ) {
sglist - > dma_address =
2007-10-22 22:02:46 +04:00
dma_4u_map_single ( dev , sg_virt ( sglist ) ,
2006-02-10 11:08:26 +03:00
sglist - > length , direction ) ;
2007-07-28 09:39:14 +04:00
if ( unlikely ( sglist - > dma_address = = DMA_ERROR_CODE ) )
2005-10-14 09:15:24 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
sglist - > dma_length = sglist - > length ;
return 1 ;
}
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
if ( unlikely ( direction = = DMA_NONE ) )
2005-10-14 09:15:24 +04:00
goto bad_no_ctx ;
2005-04-17 02:20:36 +04:00
/* Step 1: Prepare scatter list. */
2008-02-05 09:28:02 +03:00
npages = prepare_sg ( dev , sglist , nelems ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
/* Step 2: Allocate a cluster and context, if necessary. */
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & iommu - > lock , flags ) ;
2005-10-14 09:15:24 +04:00
base = alloc_npages ( iommu , npages ) ;
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = iommu_alloc_ctx ( iommu ) ;
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
if ( base = = NULL )
goto bad ;
2005-10-14 09:15:24 +04:00
dma_base = iommu - > page_table_map_base +
( ( base - iommu - > page_table ) < < IO_PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
/* Step 3: Normalize DMA addresses. */
used = nelems ;
sgtmp = sglist ;
while ( used & & sgtmp - > dma_length ) {
sgtmp - > dma_address + = dma_base ;
2007-08-07 11:37:10 +04:00
sgtmp = sg_next ( sgtmp ) ;
2005-04-17 02:20:36 +04:00
used - - ;
}
used = nelems - used ;
2005-10-14 09:15:24 +04:00
/* Step 4: Create the mappings. */
2005-04-17 02:20:36 +04:00
if ( strbuf - > strbuf_enabled )
iopte_protection = IOPTE_STREAMING ( ctx ) ;
else
iopte_protection = IOPTE_CONSISTENT ( ctx ) ;
2007-07-28 09:39:14 +04:00
if ( direction ! = DMA_TO_DEVICE )
2005-04-17 02:20:36 +04:00
iopte_protection | = IOPTE_WRITE ;
2005-10-14 09:15:24 +04:00
fill_sg ( base , sglist , used , nelems , iopte_protection ) ;
2005-04-17 02:20:36 +04:00
# ifdef VERIFY_SG
verify_sglist ( sglist , nelems , base , npages ) ;
# endif
return used ;
bad :
2005-10-14 09:15:24 +04:00
iommu_free_ctx ( iommu , ctx ) ;
bad_no_ctx :
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-07-28 09:39:14 +04:00
static void dma_4u_unmap_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
iopte_t * base ;
unsigned long flags , ctx , i , npages ;
2007-08-07 11:37:10 +04:00
struct scatterlist * sg , * sgprv ;
2005-04-17 02:20:36 +04:00
u32 bus_addr ;
2007-07-28 09:39:14 +04:00
if ( unlikely ( direction = = DMA_NONE ) ) {
2005-10-14 09:15:24 +04:00
if ( printk_ratelimit ( ) )
WARN_ON ( 1 ) ;
}
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
bus_addr = sglist - > dma_address & IO_PAGE_MASK ;
2007-08-07 11:37:10 +04:00
sgprv = NULL ;
for_each_sg ( sglist , sg , nelems , i ) {
if ( sg - > dma_length = = 0 )
2005-04-17 02:20:36 +04:00
break ;
2007-08-07 11:37:10 +04:00
sgprv = sg ;
}
npages = ( IO_PAGE_ALIGN ( sgprv - > dma_address + sgprv - > dma_length ) -
2005-10-14 09:15:24 +04:00
bus_addr ) > > IO_PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
base = iommu - > page_table +
( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
/* Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush )
ctx = ( iopte_val ( * base ) & IOPTE_CONTEXT ) > > 47UL ;
/* Step 1: Kick data out of streaming buffers if necessary. */
2005-05-11 22:37:00 +04:00
if ( strbuf - > strbuf_enabled )
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx , npages , direction ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
/* Step 2: Clear out the TSB entries. */
for ( i = 0 ; i < npages ; i + + )
iopte_make_dummy ( iommu , base + i ) ;
2005-04-17 02:20:36 +04:00
2005-10-14 09:15:24 +04:00
free_npages ( iommu , bus_addr - iommu - > page_table_map_base , npages ) ;
2005-04-17 02:20:36 +04:00
2005-06-01 03:57:59 +04:00
iommu_free_ctx ( iommu , ctx ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_sync_single_for_cpu ( struct device * dev ,
dma_addr_t bus_addr , size_t sz ,
enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-04-17 02:20:36 +04:00
unsigned long flags , ctx , npages ;
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
if ( ! strbuf - > strbuf_enabled )
return ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
npages = IO_PAGE_ALIGN ( bus_addr + sz ) - ( bus_addr & IO_PAGE_MASK ) ;
npages > > = IO_PAGE_SHIFT ;
bus_addr & = IO_PAGE_MASK ;
/* Step 1: Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush & &
strbuf - > strbuf_ctxflush ) {
iopte_t * iopte ;
iopte = iommu - > page_table +
( ( bus_addr - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
ctx = ( iopte_val ( * iopte ) & IOPTE_CONTEXT ) > > 47UL ;
}
/* Step 2: Kick data out of streaming buffers. */
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx , npages , direction ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-07-28 09:39:14 +04:00
static void dma_4u_sync_sg_for_cpu ( struct device * dev ,
struct scatterlist * sglist , int nelems ,
enum dma_data_direction direction )
2005-04-17 02:20:36 +04:00
{
2007-04-27 08:08:21 +04:00
struct iommu * iommu ;
struct strbuf * strbuf ;
2005-05-11 22:37:00 +04:00
unsigned long flags , ctx , npages , i ;
2007-08-07 11:37:10 +04:00
struct scatterlist * sg , * sgprv ;
2005-05-11 22:37:00 +04:00
u32 bus_addr ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
iommu = dev - > archdata . iommu ;
strbuf = dev - > archdata . stc ;
2005-04-17 02:20:36 +04:00
if ( ! strbuf - > strbuf_enabled )
return ;
spin_lock_irqsave ( & iommu - > lock , flags ) ;
/* Step 1: Record the context, if any. */
ctx = 0 ;
if ( iommu - > iommu_ctxflush & &
strbuf - > strbuf_ctxflush ) {
iopte_t * iopte ;
iopte = iommu - > page_table +
( ( sglist [ 0 ] . dma_address - iommu - > page_table_map_base ) > > IO_PAGE_SHIFT ) ;
ctx = ( iopte_val ( * iopte ) & IOPTE_CONTEXT ) > > 47UL ;
}
/* Step 2: Kick data out of streaming buffers. */
2005-05-11 22:37:00 +04:00
bus_addr = sglist [ 0 ] . dma_address & IO_PAGE_MASK ;
2007-08-07 11:37:10 +04:00
sgprv = NULL ;
for_each_sg ( sglist , sg , nelems , i ) {
if ( sg - > dma_length = = 0 )
2005-05-11 22:37:00 +04:00
break ;
2007-08-07 11:37:10 +04:00
sgprv = sg ;
}
npages = ( IO_PAGE_ALIGN ( sgprv - > dma_address + sgprv - > dma_length )
2005-05-11 22:37:00 +04:00
- bus_addr ) > > IO_PAGE_SHIFT ;
2007-07-28 09:39:14 +04:00
strbuf_flush ( strbuf , iommu , bus_addr , ctx , npages , direction ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & iommu - > lock , flags ) ;
}
2007-07-28 09:39:14 +04:00
const struct dma_ops sun4u_dma_ops = {
. alloc_coherent = dma_4u_alloc_coherent ,
. free_coherent = dma_4u_free_coherent ,
. map_single = dma_4u_map_single ,
. unmap_single = dma_4u_unmap_single ,
. map_sg = dma_4u_map_sg ,
. unmap_sg = dma_4u_unmap_sg ,
. sync_single_for_cpu = dma_4u_sync_single_for_cpu ,
. sync_sg_for_cpu = dma_4u_sync_sg_for_cpu ,
2006-02-10 08:32:07 +03:00
} ;
2007-07-28 09:39:14 +04:00
const struct dma_ops * dma_ops = & sun4u_dma_ops ;
EXPORT_SYMBOL ( dma_ops ) ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
int dma_supported ( struct device * dev , u64 device_mask )
2005-04-17 02:20:36 +04:00
{
2007-07-28 09:39:14 +04:00
struct iommu * iommu = dev - > archdata . iommu ;
u64 dma_addr_mask = iommu - > dma_addr_mask ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( device_mask > = ( 1UL < < 32UL ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
if ( ( device_mask & dma_addr_mask ) = = dma_addr_mask )
return 1 ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
# ifdef CONFIG_PCI
if ( dev - > bus = = & pci_bus_type )
return pci_dma_supported ( to_pci_dev ( dev ) , device_mask ) ;
# endif
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
return 0 ;
}
EXPORT_SYMBOL ( dma_supported ) ;
2005-04-17 02:20:36 +04:00
2007-07-28 09:39:14 +04:00
int dma_set_mask ( struct device * dev , u64 dma_mask )
{
# ifdef CONFIG_PCI
if ( dev - > bus = = & pci_bus_type )
return pci_set_dma_mask ( to_pci_dev ( dev ) , dma_mask ) ;
# endif
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2007-07-28 09:39:14 +04:00
EXPORT_SYMBOL ( dma_set_mask ) ;