2005-04-17 02:20:36 +04:00
/*
* Dynamic DMA mapping support .
*
2007-02-06 05:51:25 +03:00
* This implementation is a fallback for platforms that do not support
2005-04-17 02:20:36 +04:00
* I / O TLBs ( aka DMA address translation hardware ) .
* Copyright ( C ) 2000 Asit Mallick < Asit . K . Mallick @ intel . com >
* Copyright ( C ) 2000 Goutham Rao < goutham . rao @ intel . com >
* Copyright ( C ) 2000 , 2003 Hewlett - Packard Co
* David Mosberger - Tang < davidm @ hpl . hp . com >
*
* 03 / 05 / 07 davidm Switch from PCI - DMA to generic device DMA API .
* 00 / 12 / 13 davidm Rename to swiotlb . c and add mark_clean ( ) to avoid
* unnecessary i - cache flushing .
2005-09-30 01:45:24 +04:00
* 04 / 07 / . . ak Better overflow handling . Assorted fixes .
* 05 / 09 / 10 linville Add support for syncing ranges , support syncing for
* DMA_BIDIRECTIONAL mappings , miscellaneous cleanup .
2008-12-22 21:26:09 +03:00
* 08 / 12 / 11 beckyb Add highmem support
2005-04-17 02:20:36 +04:00
*/
# include <linux/cache.h>
2005-09-30 02:52:13 +04:00
# include <linux/dma-mapping.h>
2005-04-17 02:20:36 +04:00
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/spinlock.h>
# include <linux/string.h>
2008-12-16 23:17:27 +03:00
# include <linux/swiotlb.h>
2008-12-22 21:26:09 +03:00
# include <linux/pfn.h>
2005-04-17 02:20:36 +04:00
# include <linux/types.h>
# include <linux/ctype.h>
2008-12-16 23:17:33 +03:00
# include <linux/highmem.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/dma.h>
2005-09-30 02:52:13 +04:00
# include <asm/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
# include <linux/bootmem.h>
2008-04-29 11:59:36 +04:00
# include <linux/iommu-helper.h>
2005-04-17 02:20:36 +04:00
# define OFFSET(val,align) ((unsigned long) \
( ( val ) & ( ( align ) - 1 ) ) )
2005-09-06 21:20:49 +04:00
# define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with . Systems with mainly
* 64 bit capable cards will only lightly use the swiotlb . If we can ' t
* allocate a contiguous 1 MB , we ' re probably in trouble anyway .
*/
# define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
2005-09-30 01:44:57 +04:00
/*
* Enumeration for sync targets
*/
enum dma_sync_target {
SYNC_FOR_CPU = 0 ,
SYNC_FOR_DEVICE = 1 ,
} ;
2005-04-17 02:20:36 +04:00
int swiotlb_force ;
/*
2009-04-08 18:09:15 +04:00
* Used to do a quick range check in unmap_single and
* sync_single_ * , to see if the memory was in fact allocated by this
2005-04-17 02:20:36 +04:00
* API .
*/
static char * io_tlb_start , * io_tlb_end ;
/*
* The number of IO TLB blocks ( in groups of 64 ) betweeen io_tlb_start and
* io_tlb_end . This is command line adjustable via setup_io_tlb_npages .
*/
static unsigned long io_tlb_nslabs ;
/*
* When the IOMMU overflows we return a fallback buffer . This sets the size .
*/
static unsigned long io_tlb_overflow = 32 * 1024 ;
void * io_tlb_overflow_buffer ;
/*
* This is a free list describing the number of free entries available from
* each index
*/
static unsigned int * io_tlb_list ;
static unsigned int io_tlb_index ;
/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations .
*/
2008-12-22 21:26:08 +03:00
static phys_addr_t * io_tlb_orig_addr ;
2005-04-17 02:20:36 +04:00
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK ( io_tlb_lock ) ;
static int __init
setup_io_tlb_npages ( char * str )
{
if ( isdigit ( * str ) ) {
2005-08-05 00:06:00 +04:00
io_tlb_nslabs = simple_strtoul ( str , & str , 0 ) ;
2005-04-17 02:20:36 +04:00
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN ( io_tlb_nslabs , IO_TLB_SEGSIZE ) ;
}
if ( * str = = ' , ' )
+ + str ;
if ( ! strcmp ( str , " force " ) )
swiotlb_force = 1 ;
return 1 ;
}
__setup ( " swiotlb= " , setup_io_tlb_npages ) ;
/* make io_tlb_overflow tunable too? */
2008-12-31 07:18:00 +03:00
void * __weak __init swiotlb_alloc_boot ( size_t size , unsigned long nslabs )
2008-12-16 23:17:26 +03:00
{
return alloc_bootmem_low_pages ( size ) ;
}
void * __weak swiotlb_alloc ( unsigned order , unsigned long nslabs )
{
return ( void * ) __get_free_pages ( GFP_DMA | __GFP_NOWARN , order ) ;
}
2008-12-22 21:26:05 +03:00
dma_addr_t __weak swiotlb_phys_to_bus ( struct device * hwdev , phys_addr_t paddr )
2008-12-16 23:17:30 +03:00
{
return paddr ;
}
phys_addr_t __weak swiotlb_bus_to_phys ( dma_addr_t baddr )
{
return baddr ;
}
2008-12-22 21:26:05 +03:00
static dma_addr_t swiotlb_virt_to_bus ( struct device * hwdev ,
volatile void * address )
2008-12-16 23:17:30 +03:00
{
2008-12-22 21:26:05 +03:00
return swiotlb_phys_to_bus ( hwdev , virt_to_phys ( address ) ) ;
2008-12-16 23:17:30 +03:00
}
static void * swiotlb_bus_to_virt ( dma_addr_t address )
{
return phys_to_virt ( swiotlb_bus_to_phys ( address ) ) ;
}
2009-04-08 18:09:18 +04:00
int __weak swiotlb_arch_address_needs_mapping ( struct device * hwdev ,
dma_addr_t addr , size_t size )
{
return ! is_buffer_dma_capable ( dma_get_mask ( hwdev ) , addr , size ) ;
}
2009-01-09 21:32:09 +03:00
int __weak swiotlb_arch_range_needs_mapping ( phys_addr_t paddr , size_t size )
2008-12-16 23:17:31 +03:00
{
return 0 ;
}
2008-12-16 23:17:34 +03:00
static void swiotlb_print_info ( unsigned long bytes )
{
phys_addr_t pstart , pend ;
pstart = virt_to_phys ( io_tlb_start ) ;
pend = virt_to_phys ( io_tlb_end ) ;
printk ( KERN_INFO " Placing %luMB software IO TLB between %p - %p \n " ,
bytes > > 20 , io_tlb_start , io_tlb_end ) ;
2008-12-22 21:26:05 +03:00
printk ( KERN_INFO " software IO TLB at phys %#llx - %#llx \n " ,
( unsigned long long ) pstart ,
( unsigned long long ) pend ) ;
2008-12-16 23:17:34 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
2005-09-30 02:52:13 +04:00
* structures for the software IO TLB used to implement the DMA API .
2005-04-17 02:20:36 +04:00
*/
2007-02-06 05:51:25 +03:00
void __init
swiotlb_init_with_default_size ( size_t default_size )
2005-04-17 02:20:36 +04:00
{
2007-02-06 05:51:25 +03:00
unsigned long i , bytes ;
2005-04-17 02:20:36 +04:00
if ( ! io_tlb_nslabs ) {
2005-08-05 00:06:00 +04:00
io_tlb_nslabs = ( default_size > > IO_TLB_SHIFT ) ;
2005-04-17 02:20:36 +04:00
io_tlb_nslabs = ALIGN ( io_tlb_nslabs , IO_TLB_SEGSIZE ) ;
}
2007-02-06 05:51:25 +03:00
bytes = io_tlb_nslabs < < IO_TLB_SHIFT ;
2005-04-17 02:20:36 +04:00
/*
* Get IO TLB memory from the low pages
*/
2008-12-16 23:17:26 +03:00
io_tlb_start = swiotlb_alloc_boot ( bytes , io_tlb_nslabs ) ;
2005-04-17 02:20:36 +04:00
if ( ! io_tlb_start )
panic ( " Cannot allocate SWIOTLB buffer " ) ;
2007-02-06 05:51:25 +03:00
io_tlb_end = io_tlb_start + bytes ;
2005-04-17 02:20:36 +04:00
/*
* Allocate and initialize the free list array . This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end .
*/
io_tlb_list = alloc_bootmem ( io_tlb_nslabs * sizeof ( int ) ) ;
2007-03-07 00:31:45 +03:00
for ( i = 0 ; i < io_tlb_nslabs ; i + + )
2005-04-17 02:20:36 +04:00
io_tlb_list [ i ] = IO_TLB_SEGSIZE - OFFSET ( i , IO_TLB_SEGSIZE ) ;
io_tlb_index = 0 ;
2008-12-22 21:26:08 +03:00
io_tlb_orig_addr = alloc_bootmem ( io_tlb_nslabs * sizeof ( phys_addr_t ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = alloc_bootmem_low ( io_tlb_overflow ) ;
2007-02-06 05:51:25 +03:00
if ( ! io_tlb_overflow_buffer )
panic ( " Cannot allocate SWIOTLB overflow buffer! \n " ) ;
2008-12-16 23:17:34 +03:00
swiotlb_print_info ( bytes ) ;
2005-04-17 02:20:36 +04:00
}
2007-02-06 05:51:25 +03:00
void __init
swiotlb_init ( void )
2005-04-17 02:20:36 +04:00
{
2007-03-07 00:31:45 +03:00
swiotlb_init_with_default_size ( 64 * ( 1 < < 20 ) ) ; /* default to 64MB */
2005-04-17 02:20:36 +04:00
}
2005-09-06 21:20:49 +04:00
/*
* Systems with larger DMA zones ( those that don ' t support ISA ) can
* initialize the swiotlb later using the slab allocator if needed .
* This should be just like above , but with some error catching .
*/
int
2007-02-06 05:51:25 +03:00
swiotlb_late_init_with_default_size ( size_t default_size )
2005-09-06 21:20:49 +04:00
{
2007-02-06 05:51:25 +03:00
unsigned long i , bytes , req_nslabs = io_tlb_nslabs ;
2005-09-06 21:20:49 +04:00
unsigned int order ;
if ( ! io_tlb_nslabs ) {
io_tlb_nslabs = ( default_size > > IO_TLB_SHIFT ) ;
io_tlb_nslabs = ALIGN ( io_tlb_nslabs , IO_TLB_SEGSIZE ) ;
}
/*
* Get IO TLB memory from the low pages
*/
2007-02-06 05:51:25 +03:00
order = get_order ( io_tlb_nslabs < < IO_TLB_SHIFT ) ;
2005-09-06 21:20:49 +04:00
io_tlb_nslabs = SLABS_PER_PAGE < < order ;
2007-02-06 05:51:25 +03:00
bytes = io_tlb_nslabs < < IO_TLB_SHIFT ;
2005-09-06 21:20:49 +04:00
while ( ( SLABS_PER_PAGE < < order ) > IO_TLB_MIN_SLABS ) {
2008-12-16 23:17:26 +03:00
io_tlb_start = swiotlb_alloc ( order , io_tlb_nslabs ) ;
2005-09-06 21:20:49 +04:00
if ( io_tlb_start )
break ;
order - - ;
}
if ( ! io_tlb_start )
goto cleanup1 ;
2007-02-06 05:51:25 +03:00
if ( order ! = get_order ( bytes ) ) {
2005-09-06 21:20:49 +04:00
printk ( KERN_WARNING " Warning: only able to allocate %ld MB "
" for software IO TLB \n " , ( PAGE_SIZE < < order ) > > 20 ) ;
io_tlb_nslabs = SLABS_PER_PAGE < < order ;
2007-02-06 05:51:25 +03:00
bytes = io_tlb_nslabs < < IO_TLB_SHIFT ;
2005-09-06 21:20:49 +04:00
}
2007-02-06 05:51:25 +03:00
io_tlb_end = io_tlb_start + bytes ;
memset ( io_tlb_start , 0 , bytes ) ;
2005-09-06 21:20:49 +04:00
/*
* Allocate and initialize the free list array . This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end .
*/
io_tlb_list = ( unsigned int * ) __get_free_pages ( GFP_KERNEL ,
get_order ( io_tlb_nslabs * sizeof ( int ) ) ) ;
if ( ! io_tlb_list )
goto cleanup2 ;
for ( i = 0 ; i < io_tlb_nslabs ; i + + )
io_tlb_list [ i ] = IO_TLB_SEGSIZE - OFFSET ( i , IO_TLB_SEGSIZE ) ;
io_tlb_index = 0 ;
2008-12-22 21:26:08 +03:00
io_tlb_orig_addr = ( phys_addr_t * )
__get_free_pages ( GFP_KERNEL ,
get_order ( io_tlb_nslabs *
sizeof ( phys_addr_t ) ) ) ;
2005-09-06 21:20:49 +04:00
if ( ! io_tlb_orig_addr )
goto cleanup3 ;
2008-12-22 21:26:08 +03:00
memset ( io_tlb_orig_addr , 0 , io_tlb_nslabs * sizeof ( phys_addr_t ) ) ;
2005-09-06 21:20:49 +04:00
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = ( void * ) __get_free_pages ( GFP_DMA ,
get_order ( io_tlb_overflow ) ) ;
if ( ! io_tlb_overflow_buffer )
goto cleanup4 ;
2008-12-16 23:17:34 +03:00
swiotlb_print_info ( bytes ) ;
2005-09-06 21:20:49 +04:00
return 0 ;
cleanup4 :
2008-12-22 21:26:08 +03:00
free_pages ( ( unsigned long ) io_tlb_orig_addr ,
get_order ( io_tlb_nslabs * sizeof ( phys_addr_t ) ) ) ;
2005-09-06 21:20:49 +04:00
io_tlb_orig_addr = NULL ;
cleanup3 :
2007-03-07 00:31:45 +03:00
free_pages ( ( unsigned long ) io_tlb_list , get_order ( io_tlb_nslabs *
sizeof ( int ) ) ) ;
2005-09-06 21:20:49 +04:00
io_tlb_list = NULL ;
cleanup2 :
2007-02-06 05:51:25 +03:00
io_tlb_end = NULL ;
2005-09-06 21:20:49 +04:00
free_pages ( ( unsigned long ) io_tlb_start , order ) ;
io_tlb_start = NULL ;
cleanup1 :
io_tlb_nslabs = req_nslabs ;
return - ENOMEM ;
}
2009-04-08 18:09:18 +04:00
static inline int
2008-09-09 20:06:49 +04:00
address_needs_mapping ( struct device * hwdev , dma_addr_t addr , size_t size )
2005-04-17 02:20:36 +04:00
{
2009-04-08 18:09:18 +04:00
return swiotlb_arch_address_needs_mapping ( hwdev , addr , size ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-09 21:32:09 +03:00
static inline int range_needs_mapping ( phys_addr_t paddr , size_t size )
2008-12-16 23:17:31 +03:00
{
2009-01-09 21:32:09 +03:00
return swiotlb_force | | swiotlb_arch_range_needs_mapping ( paddr , size ) ;
2008-12-16 23:17:31 +03:00
}
2008-09-08 13:53:50 +04:00
static int is_swiotlb_buffer ( char * addr )
{
return addr > = io_tlb_start & & addr < io_tlb_end ;
}
2008-12-22 21:26:09 +03:00
/*
* Bounce : copy the swiotlb buffer back to the original dma location
*/
static void swiotlb_bounce ( phys_addr_t phys , char * dma_addr , size_t size ,
enum dma_data_direction dir )
{
unsigned long pfn = PFN_DOWN ( phys ) ;
if ( PageHighMem ( pfn_to_page ( pfn ) ) ) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = phys & ~ PAGE_MASK ;
char * buffer ;
unsigned int sz = 0 ;
unsigned long flags ;
while ( size ) {
2009-04-08 18:09:16 +04:00
sz = min_t ( size_t , PAGE_SIZE - offset , size ) ;
2008-12-22 21:26:09 +03:00
local_irq_save ( flags ) ;
buffer = kmap_atomic ( pfn_to_page ( pfn ) ,
KM_BOUNCE_READ ) ;
if ( dir = = DMA_TO_DEVICE )
memcpy ( dma_addr , buffer + offset , sz ) ;
2008-12-16 23:17:33 +03:00
else
2008-12-22 21:26:09 +03:00
memcpy ( buffer + offset , dma_addr , sz ) ;
kunmap_atomic ( buffer , KM_BOUNCE_READ ) ;
2008-12-16 23:17:33 +03:00
local_irq_restore ( flags ) ;
2008-12-22 21:26:09 +03:00
size - = sz ;
pfn + + ;
dma_addr + = sz ;
offset = 0 ;
2008-12-16 23:17:33 +03:00
}
} else {
if ( dir = = DMA_TO_DEVICE )
2008-12-22 21:26:09 +03:00
memcpy ( dma_addr , phys_to_virt ( phys ) , size ) ;
2008-12-16 23:17:33 +03:00
else
2008-12-22 21:26:09 +03:00
memcpy ( phys_to_virt ( phys ) , dma_addr , size ) ;
2008-12-16 23:17:33 +03:00
}
2008-12-16 23:17:32 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Allocates bounce buffer and returns its kernel virtual address .
*/
static void *
2008-12-22 21:26:08 +03:00
map_single ( struct device * hwdev , phys_addr_t phys , size_t size , int dir )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
char * dma_addr ;
unsigned int nslots , stride , index , wrap ;
int i ;
2008-02-05 09:28:16 +03:00
unsigned long start_dma_addr ;
unsigned long mask ;
unsigned long offset_slots ;
unsigned long max_slots ;
mask = dma_get_seg_boundary ( hwdev ) ;
2008-12-22 21:26:05 +03:00
start_dma_addr = swiotlb_virt_to_bus ( hwdev , io_tlb_start ) & mask ;
2008-02-05 09:28:16 +03:00
offset_slots = ALIGN ( start_dma_addr , 1 < < IO_TLB_SHIFT ) > > IO_TLB_SHIFT ;
2008-12-16 23:17:29 +03:00
/*
* Carefully handle integer overflow which can occur when mask = = ~ 0UL .
*/
2008-03-13 12:13:30 +03:00
max_slots = mask + 1
? ALIGN ( mask + 1 , 1 < < IO_TLB_SHIFT ) > > IO_TLB_SHIFT
: 1UL < < ( BITS_PER_LONG - IO_TLB_SHIFT ) ;
2005-04-17 02:20:36 +04:00
/*
* For mappings greater than a page , we limit the stride ( and
* hence alignment ) to a page size .
*/
nslots = ALIGN ( size , 1 < < IO_TLB_SHIFT ) > > IO_TLB_SHIFT ;
if ( size > PAGE_SIZE )
stride = ( 1 < < ( PAGE_SHIFT - IO_TLB_SHIFT ) ) ;
else
stride = 1 ;
2006-03-24 20:47:11 +03:00
BUG_ON ( ! nslots ) ;
2005-04-17 02:20:36 +04:00
/*
* Find suitable number of IO TLB entries size that will fit this
* request and allocate a buffer from that IO TLB pool .
*/
spin_lock_irqsave ( & io_tlb_lock , flags ) ;
2008-04-29 11:59:36 +04:00
index = ALIGN ( io_tlb_index , stride ) ;
if ( index > = io_tlb_nslabs )
index = 0 ;
wrap = index ;
do {
2008-04-29 11:59:36 +04:00
while ( iommu_is_span_boundary ( index , nslots , offset_slots ,
max_slots ) ) {
2008-03-13 12:13:30 +03:00
index + = stride ;
if ( index > = io_tlb_nslabs )
index = 0 ;
2008-04-29 11:59:36 +04:00
if ( index = = wrap )
goto not_found ;
}
/*
* If we find a slot that indicates we have ' nslots ' number of
* contiguous buffers , we allocate the buffers from that slot
* and mark the entries as ' 0 ' indicating unavailable .
*/
if ( io_tlb_list [ index ] > = nslots ) {
int count = 0 ;
for ( i = index ; i < ( int ) ( index + nslots ) ; i + + )
io_tlb_list [ i ] = 0 ;
for ( i = index - 1 ; ( OFFSET ( i , IO_TLB_SEGSIZE ) ! = IO_TLB_SEGSIZE - 1 ) & & io_tlb_list [ i ] ; i - - )
io_tlb_list [ i ] = + + count ;
dma_addr = io_tlb_start + ( index < < IO_TLB_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2008-04-29 11:59:36 +04:00
/*
* Update the indices to avoid searching in the next
* round .
*/
io_tlb_index = ( ( index + nslots ) < io_tlb_nslabs
? ( index + nslots ) : 0 ) ;
goto found ;
}
index + = stride ;
if ( index > = io_tlb_nslabs )
index = 0 ;
} while ( index ! = wrap ) ;
not_found :
spin_unlock_irqrestore ( & io_tlb_lock , flags ) ;
return NULL ;
found :
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( & io_tlb_lock , flags ) ;
/*
* Save away the mapping from the original address to the DMA address .
* This is needed when we sync the memory . Then we sync the buffer if
* needed .
*/
2008-12-22 21:26:08 +03:00
for ( i = 0 ; i < nslots ; i + + )
io_tlb_orig_addr [ index + i ] = phys + ( i < < IO_TLB_SHIFT ) ;
2005-04-17 02:20:36 +04:00
if ( dir = = DMA_TO_DEVICE | | dir = = DMA_BIDIRECTIONAL )
2008-12-22 21:26:09 +03:00
swiotlb_bounce ( phys , dma_addr , size , DMA_TO_DEVICE ) ;
2005-04-17 02:20:36 +04:00
return dma_addr ;
}
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap .
*/
static void
unmap_single ( struct device * hwdev , char * dma_addr , size_t size , int dir )
{
unsigned long flags ;
int i , count , nslots = ALIGN ( size , 1 < < IO_TLB_SHIFT ) > > IO_TLB_SHIFT ;
int index = ( dma_addr - io_tlb_start ) > > IO_TLB_SHIFT ;
2008-12-22 21:26:08 +03:00
phys_addr_t phys = io_tlb_orig_addr [ index ] ;
2005-04-17 02:20:36 +04:00
/*
* First , sync the memory before unmapping the entry
*/
2008-12-22 21:26:08 +03:00
if ( phys & & ( ( dir = = DMA_FROM_DEVICE ) | | ( dir = = DMA_BIDIRECTIONAL ) ) )
2008-12-22 21:26:09 +03:00
swiotlb_bounce ( phys , dma_addr , size , DMA_FROM_DEVICE ) ;
2005-04-17 02:20:36 +04:00
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contigous entries available .
* While returning the entries to the free list , we merge the entries
* with slots below and above the pool being returned .
*/
spin_lock_irqsave ( & io_tlb_lock , flags ) ;
{
count = ( ( index + nslots ) < ALIGN ( index + 1 , IO_TLB_SEGSIZE ) ?
io_tlb_list [ index + nslots ] : 0 ) ;
/*
* Step 1 : return the slots to the free list , merging the
* slots with superceeding slots
*/
for ( i = index + nslots - 1 ; i > = index ; i - - )
io_tlb_list [ i ] = + + count ;
/*
* Step 2 : merge the returned slots with the preceding slots ,
* if available ( non zero )
*/
for ( i = index - 1 ; ( OFFSET ( i , IO_TLB_SEGSIZE ) ! = IO_TLB_SEGSIZE - 1 ) & & io_tlb_list [ i ] ; i - - )
io_tlb_list [ i ] = + + count ;
}
spin_unlock_irqrestore ( & io_tlb_lock , flags ) ;
}
static void
2005-09-30 01:44:57 +04:00
sync_single ( struct device * hwdev , char * dma_addr , size_t size ,
int dir , int target )
2005-04-17 02:20:36 +04:00
{
2008-12-22 21:26:08 +03:00
int index = ( dma_addr - io_tlb_start ) > > IO_TLB_SHIFT ;
phys_addr_t phys = io_tlb_orig_addr [ index ] ;
phys + = ( ( unsigned long ) dma_addr & ( ( 1 < < IO_TLB_SHIFT ) - 1 ) ) ;
2007-07-21 15:37:24 +04:00
2005-09-30 01:44:57 +04:00
switch ( target ) {
case SYNC_FOR_CPU :
if ( likely ( dir = = DMA_FROM_DEVICE | | dir = = DMA_BIDIRECTIONAL ) )
2008-12-22 21:26:09 +03:00
swiotlb_bounce ( phys , dma_addr , size , DMA_FROM_DEVICE ) ;
2006-03-24 20:47:11 +03:00
else
BUG_ON ( dir ! = DMA_TO_DEVICE ) ;
2005-09-30 01:44:57 +04:00
break ;
case SYNC_FOR_DEVICE :
if ( likely ( dir = = DMA_TO_DEVICE | | dir = = DMA_BIDIRECTIONAL ) )
2008-12-22 21:26:09 +03:00
swiotlb_bounce ( phys , dma_addr , size , DMA_TO_DEVICE ) ;
2006-03-24 20:47:11 +03:00
else
BUG_ON ( dir ! = DMA_FROM_DEVICE ) ;
2005-09-30 01:44:57 +04:00
break ;
default :
2005-04-17 02:20:36 +04:00
BUG ( ) ;
2005-09-30 01:44:57 +04:00
}
2005-04-17 02:20:36 +04:00
}
void *
swiotlb_alloc_coherent ( struct device * hwdev , size_t size ,
2005-10-21 11:21:03 +04:00
dma_addr_t * dma_handle , gfp_t flags )
2005-04-17 02:20:36 +04:00
{
2007-02-06 05:51:25 +03:00
dma_addr_t dev_addr ;
2005-04-17 02:20:36 +04:00
void * ret ;
int order = get_order ( size ) ;
2009-04-07 06:01:15 +04:00
u64 dma_mask = DMA_BIT_MASK ( 32 ) ;
2008-11-17 10:24:34 +03:00
if ( hwdev & & hwdev - > coherent_dma_mask )
dma_mask = hwdev - > coherent_dma_mask ;
2005-04-17 02:20:36 +04:00
2007-03-07 00:31:45 +03:00
ret = ( void * ) __get_free_pages ( flags , order ) ;
2008-12-22 21:26:05 +03:00
if ( ret & &
! is_buffer_dma_capable ( dma_mask , swiotlb_virt_to_bus ( hwdev , ret ) ,
size ) ) {
2005-04-17 02:20:36 +04:00
/*
* The allocated memory isn ' t reachable by the device .
*/
free_pages ( ( unsigned long ) ret , order ) ;
ret = NULL ;
}
if ( ! ret ) {
/*
* We are either out of memory or the device can ' t DMA
2009-04-08 18:09:15 +04:00
* to GFP_DMA memory ; fall back on map_single ( ) , which
* will grab memory from the lowest available address range .
2005-04-17 02:20:36 +04:00
*/
2008-12-22 21:26:08 +03:00
ret = map_single ( hwdev , 0 , size , DMA_FROM_DEVICE ) ;
2008-09-08 13:53:48 +04:00
if ( ! ret )
2005-04-17 02:20:36 +04:00
return NULL ;
}
memset ( ret , 0 , size ) ;
2008-12-22 21:26:05 +03:00
dev_addr = swiotlb_virt_to_bus ( hwdev , ret ) ;
2005-04-17 02:20:36 +04:00
/* Confirm address can be DMA'd by device */
2008-11-17 10:24:34 +03:00
if ( ! is_buffer_dma_capable ( dma_mask , dev_addr , size ) ) {
2007-02-06 05:51:25 +03:00
printk ( " hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx \n " ,
2008-11-17 10:24:34 +03:00
( unsigned long long ) dma_mask ,
2007-02-06 05:51:25 +03:00
( unsigned long long ) dev_addr ) ;
2008-10-23 13:42:03 +04:00
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
unmap_single ( hwdev , ret , size , DMA_TO_DEVICE ) ;
return NULL ;
2005-04-17 02:20:36 +04:00
}
* dma_handle = dev_addr ;
return ret ;
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_alloc_coherent ) ;
2005-04-17 02:20:36 +04:00
void
swiotlb_free_coherent ( struct device * hwdev , size_t size , void * vaddr ,
dma_addr_t dma_handle )
{
2007-08-11 00:10:27 +04:00
WARN_ON ( irqs_disabled ( ) ) ;
2008-09-08 13:53:50 +04:00
if ( ! is_swiotlb_buffer ( vaddr ) )
2005-04-17 02:20:36 +04:00
free_pages ( ( unsigned long ) vaddr , get_order ( size ) ) ;
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
2008-09-08 13:53:49 +04:00
unmap_single ( hwdev , vaddr , size , DMA_TO_DEVICE ) ;
2005-04-17 02:20:36 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_free_coherent ) ;
2005-04-17 02:20:36 +04:00
static void
swiotlb_full ( struct device * dev , size_t size , int dir , int do_panic )
{
/*
* Ran out of IOMMU space for this operation . This is very bad .
* Unfortunately the drivers cannot handle this operation properly .
2005-09-30 02:52:13 +04:00
* unless they check for dma_mapping_error ( most don ' t )
2005-04-17 02:20:36 +04:00
* When the mapping is small enough return a static buffer to limit
* the damage , or panic when the transfer is too big .
*/
2007-02-06 05:51:25 +03:00
printk ( KERN_ERR " DMA: Out of SW-IOMMU space for %zu bytes at "
2009-01-06 21:44:37 +03:00
" device %s \n " , size , dev ? dev_name ( dev ) : " ? " ) ;
2005-04-17 02:20:36 +04:00
if ( size > io_tlb_overflow & & do_panic ) {
2005-09-30 02:52:13 +04:00
if ( dir = = DMA_FROM_DEVICE | | dir = = DMA_BIDIRECTIONAL )
panic ( " DMA: Memory would be corrupted \n " ) ;
if ( dir = = DMA_TO_DEVICE | | dir = = DMA_BIDIRECTIONAL )
panic ( " DMA: Random memory would be DMAed \n " ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Map a single buffer of the indicated size for DMA in streaming mode . The
2005-09-30 02:52:13 +04:00
* physical address to use is returned .
2005-04-17 02:20:36 +04:00
*
* Once the device is given the dma address , the device owns this memory until
2009-04-08 18:09:15 +04:00
* either swiotlb_unmap_page or swiotlb_dma_sync_single is performed .
2005-04-17 02:20:36 +04:00
*/
2009-01-05 17:59:03 +03:00
dma_addr_t swiotlb_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction dir ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2009-01-05 17:59:03 +03:00
phys_addr_t phys = page_to_phys ( page ) + offset ;
dma_addr_t dev_addr = swiotlb_phys_to_bus ( dev , phys ) ;
2005-04-17 02:20:36 +04:00
void * map ;
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
/*
2009-04-08 18:09:15 +04:00
* If the address happens to be in the device ' s DMA window ,
2005-04-17 02:20:36 +04:00
* we can safely return the device addr and not worry about bounce
* buffering it .
*/
2009-01-05 17:59:03 +03:00
if ( ! address_needs_mapping ( dev , dev_addr , size ) & &
2009-04-08 18:09:17 +04:00
! range_needs_mapping ( phys , size ) )
2005-04-17 02:20:36 +04:00
return dev_addr ;
/*
* Oh well , have to allocate and map a bounce buffer .
*/
2009-01-05 17:59:03 +03:00
map = map_single ( dev , phys , size , dir ) ;
2005-04-17 02:20:36 +04:00
if ( ! map ) {
2009-01-05 17:59:03 +03:00
swiotlb_full ( dev , size , dir , 1 ) ;
2005-04-17 02:20:36 +04:00
map = io_tlb_overflow_buffer ;
}
2009-01-05 17:59:03 +03:00
dev_addr = swiotlb_virt_to_bus ( dev , map ) ;
2005-04-17 02:20:36 +04:00
/*
* Ensure that the address returned is DMA ' ble
*/
2009-01-05 17:59:03 +03:00
if ( address_needs_mapping ( dev , dev_addr , size ) )
2005-04-17 02:20:36 +04:00
panic ( " map_single: bounce buffer is not DMA'ble " ) ;
return dev_addr ;
}
2009-01-05 17:59:03 +03:00
EXPORT_SYMBOL_GPL ( swiotlb_map_page ) ;
2005-04-17 02:20:36 +04:00
/*
* Unmap a single streaming mode DMA translation . The dma_addr and size must
2009-04-08 18:09:15 +04:00
* match what was provided for in a previous swiotlb_map_page call . All
2005-04-17 02:20:36 +04:00
* other usages are undefined .
*
* After this call , reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there .
*/
2009-01-05 17:59:03 +03:00
void swiotlb_unmap_page ( struct device * hwdev , dma_addr_t dev_addr ,
size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2008-12-16 23:17:30 +03:00
char * dma_addr = swiotlb_bus_to_virt ( dev_addr ) ;
2005-04-17 02:20:36 +04:00
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2008-09-08 13:53:50 +04:00
if ( is_swiotlb_buffer ( dma_addr ) )
2005-04-17 02:20:36 +04:00
unmap_single ( hwdev , dma_addr , size , dir ) ;
else if ( dir = = DMA_FROM_DEVICE )
2007-02-06 05:46:40 +03:00
dma_mark_clean ( dma_addr , size ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-05 17:59:03 +03:00
EXPORT_SYMBOL_GPL ( swiotlb_unmap_page ) ;
2008-12-28 09:02:07 +03:00
2005-04-17 02:20:36 +04:00
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer .
*
2009-04-08 18:09:15 +04:00
* If you perform a swiotlb_map_page ( ) but wish to interrogate the buffer
2005-09-30 02:52:13 +04:00
* using the cpu , yet do not wish to teardown the dma mapping , you must
* call this function before doing so . At the next point you give the dma
2005-04-17 02:20:36 +04:00
* address back to the card , you must first perform a
* swiotlb_dma_sync_for_device , and then the device again owns the buffer
*/
[PATCH] swiotlb uninlinings
Optimise swiotlb.c for size.
text data bss dec hex filename
5009 89 64 5162 142a lib/swiotlb.o-before
4666 89 64 4819 12d3 lib/swiotlb.o-after
For some reason my gcc (4.0.2) doesn't want to tailcall these things.
swiotlb_sync_sg_for_device:
pushq %rbp #
movl $1, %r8d #,
movq %rsp, %rbp #,
call swiotlb_sync_sg #
leave
ret
.size swiotlb_sync_sg_for_device, .-swiotlb_sync_sg_for_device
.section .text.swiotlb_sync_sg_for_cpu,"ax",@progbits
.globl swiotlb_sync_sg_for_cpu
.type swiotlb_sync_sg_for_cpu, @function
swiotlb_sync_sg_for_cpu:
pushq %rbp #
xorl %r8d, %r8d #
movq %rsp, %rbp #,
call swiotlb_sync_sg #
leave
ret
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-12 11:52:17 +03:00
static void
2005-09-30 01:43:32 +04:00
swiotlb_sync_single ( struct device * hwdev , dma_addr_t dev_addr ,
2005-09-30 01:44:57 +04:00
size_t size , int dir , int target )
2005-04-17 02:20:36 +04:00
{
2008-12-16 23:17:30 +03:00
char * dma_addr = swiotlb_bus_to_virt ( dev_addr ) ;
2005-04-17 02:20:36 +04:00
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2008-09-08 13:53:50 +04:00
if ( is_swiotlb_buffer ( dma_addr ) )
2005-09-30 01:44:57 +04:00
sync_single ( hwdev , dma_addr , size , dir , target ) ;
2005-04-17 02:20:36 +04:00
else if ( dir = = DMA_FROM_DEVICE )
2007-02-06 05:46:40 +03:00
dma_mark_clean ( dma_addr , size ) ;
2005-04-17 02:20:36 +04:00
}
2005-09-30 01:43:32 +04:00
void
swiotlb_sync_single_for_cpu ( struct device * hwdev , dma_addr_t dev_addr ,
2009-01-05 17:59:02 +03:00
size_t size , enum dma_data_direction dir )
2005-09-30 01:43:32 +04:00
{
2005-09-30 01:44:57 +04:00
swiotlb_sync_single ( hwdev , dev_addr , size , dir , SYNC_FOR_CPU ) ;
2005-09-30 01:43:32 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_sync_single_for_cpu ) ;
2005-09-30 01:43:32 +04:00
2005-04-17 02:20:36 +04:00
void
swiotlb_sync_single_for_device ( struct device * hwdev , dma_addr_t dev_addr ,
2009-01-05 17:59:02 +03:00
size_t size , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2005-09-30 01:44:57 +04:00
swiotlb_sync_single ( hwdev , dev_addr , size , dir , SYNC_FOR_DEVICE ) ;
2005-04-17 02:20:36 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_sync_single_for_device ) ;
2005-04-17 02:20:36 +04:00
2005-09-30 01:44:23 +04:00
/*
* Same as above , but for a sub - range of the mapping .
*/
[PATCH] swiotlb uninlinings
Optimise swiotlb.c for size.
text data bss dec hex filename
5009 89 64 5162 142a lib/swiotlb.o-before
4666 89 64 4819 12d3 lib/swiotlb.o-after
For some reason my gcc (4.0.2) doesn't want to tailcall these things.
swiotlb_sync_sg_for_device:
pushq %rbp #
movl $1, %r8d #,
movq %rsp, %rbp #,
call swiotlb_sync_sg #
leave
ret
.size swiotlb_sync_sg_for_device, .-swiotlb_sync_sg_for_device
.section .text.swiotlb_sync_sg_for_cpu,"ax",@progbits
.globl swiotlb_sync_sg_for_cpu
.type swiotlb_sync_sg_for_cpu, @function
swiotlb_sync_sg_for_cpu:
pushq %rbp #
xorl %r8d, %r8d #
movq %rsp, %rbp #,
call swiotlb_sync_sg #
leave
ret
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-12 11:52:17 +03:00
static void
2005-09-30 01:44:23 +04:00
swiotlb_sync_single_range ( struct device * hwdev , dma_addr_t dev_addr ,
2005-09-30 01:44:57 +04:00
unsigned long offset , size_t size ,
int dir , int target )
2005-09-30 01:44:23 +04:00
{
2008-12-16 23:17:30 +03:00
char * dma_addr = swiotlb_bus_to_virt ( dev_addr ) + offset ;
2005-09-30 01:44:23 +04:00
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2008-09-08 13:53:50 +04:00
if ( is_swiotlb_buffer ( dma_addr ) )
2005-09-30 01:44:57 +04:00
sync_single ( hwdev , dma_addr , size , dir , target ) ;
2005-09-30 01:44:23 +04:00
else if ( dir = = DMA_FROM_DEVICE )
2007-02-06 05:46:40 +03:00
dma_mark_clean ( dma_addr , size ) ;
2005-09-30 01:44:23 +04:00
}
void
swiotlb_sync_single_range_for_cpu ( struct device * hwdev , dma_addr_t dev_addr ,
2009-01-05 17:59:02 +03:00
unsigned long offset , size_t size ,
enum dma_data_direction dir )
2005-09-30 01:44:23 +04:00
{
2005-09-30 01:44:57 +04:00
swiotlb_sync_single_range ( hwdev , dev_addr , offset , size , dir ,
SYNC_FOR_CPU ) ;
2005-09-30 01:44:23 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL_GPL ( swiotlb_sync_single_range_for_cpu ) ;
2005-09-30 01:44:23 +04:00
void
swiotlb_sync_single_range_for_device ( struct device * hwdev , dma_addr_t dev_addr ,
2009-01-05 17:59:02 +03:00
unsigned long offset , size_t size ,
enum dma_data_direction dir )
2005-09-30 01:44:23 +04:00
{
2005-09-30 01:44:57 +04:00
swiotlb_sync_single_range ( hwdev , dev_addr , offset , size , dir ,
SYNC_FOR_DEVICE ) ;
2005-09-30 01:44:23 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL_GPL ( swiotlb_sync_single_range_for_device ) ;
2005-09-30 01:44:23 +04:00
2005-04-17 02:20:36 +04:00
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA .
2009-04-08 18:09:15 +04:00
* This is the scatter - gather version of the above swiotlb_map_page
2005-04-17 02:20:36 +04:00
* interface . Here the scatter gather list elements are each tagged with the
* appropriate dma address and length . They are obtained via
* sg_dma_ { address , length } ( SG ) .
*
* NOTE : An implementation may be able to use a smaller number of
* DMA address / length pairs than there are SG table elements .
* ( for example via virtual mapping capabilities )
* The routine returns the number of addr / length pairs actually
* used , at most nents .
*
2009-04-08 18:09:15 +04:00
* Device ownership issues as mentioned above for swiotlb_map_page are the
2005-04-17 02:20:36 +04:00
* same here .
*/
int
2008-04-29 12:00:32 +04:00
swiotlb_map_sg_attrs ( struct device * hwdev , struct scatterlist * sgl , int nelems ,
2009-01-05 17:59:02 +03:00
enum dma_data_direction dir , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2007-05-11 16:56:18 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int i ;
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
2007-05-11 16:56:18 +04:00
for_each_sg ( sgl , sg , nelems , i ) {
2009-01-09 21:32:10 +03:00
phys_addr_t paddr = sg_phys ( sg ) ;
dma_addr_t dev_addr = swiotlb_phys_to_bus ( hwdev , paddr ) ;
2008-12-22 21:26:08 +03:00
2009-01-09 21:32:10 +03:00
if ( range_needs_mapping ( paddr , sg - > length ) | |
2008-09-09 20:06:49 +04:00
address_needs_mapping ( hwdev , dev_addr , sg - > length ) ) {
2008-12-22 21:26:08 +03:00
void * map = map_single ( hwdev , sg_phys ( sg ) ,
sg - > length , dir ) ;
2005-12-20 16:45:19 +03:00
if ( ! map ) {
2005-04-17 02:20:36 +04:00
/* Don't panic here, we expect map_sg users
to do proper error handling . */
swiotlb_full ( hwdev , sg - > length , dir , 0 ) ;
2008-04-29 12:00:32 +04:00
swiotlb_unmap_sg_attrs ( hwdev , sgl , i , dir ,
attrs ) ;
2007-05-11 16:56:18 +04:00
sgl [ 0 ] . dma_length = 0 ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-12-22 21:26:05 +03:00
sg - > dma_address = swiotlb_virt_to_bus ( hwdev , map ) ;
2005-04-17 02:20:36 +04:00
} else
sg - > dma_address = dev_addr ;
sg - > dma_length = sg - > length ;
}
return nelems ;
}
2008-04-29 12:00:32 +04:00
EXPORT_SYMBOL ( swiotlb_map_sg_attrs ) ;
int
swiotlb_map_sg ( struct device * hwdev , struct scatterlist * sgl , int nelems ,
int dir )
{
return swiotlb_map_sg_attrs ( hwdev , sgl , nelems , dir , NULL ) ;
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_map_sg ) ;
2005-04-17 02:20:36 +04:00
/*
* Unmap a set of streaming mode DMA translations . Again , cpu read rules
2009-04-08 18:09:15 +04:00
* concerning calls here are the same as for swiotlb_unmap_page ( ) above .
2005-04-17 02:20:36 +04:00
*/
void
2008-04-29 12:00:32 +04:00
swiotlb_unmap_sg_attrs ( struct device * hwdev , struct scatterlist * sgl ,
2009-01-05 17:59:02 +03:00
int nelems , enum dma_data_direction dir , struct dma_attrs * attrs )
2005-04-17 02:20:36 +04:00
{
2007-05-11 16:56:18 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int i ;
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
2007-05-11 16:56:18 +04:00
for_each_sg ( sgl , sg , nelems , i ) {
2009-01-09 21:32:10 +03:00
if ( sg - > dma_address ! = swiotlb_phys_to_bus ( hwdev , sg_phys ( sg ) ) )
2008-12-16 23:17:30 +03:00
unmap_single ( hwdev , swiotlb_bus_to_virt ( sg - > dma_address ) ,
2007-02-06 05:49:45 +03:00
sg - > dma_length , dir ) ;
2005-04-17 02:20:36 +04:00
else if ( dir = = DMA_FROM_DEVICE )
2009-01-09 21:32:10 +03:00
dma_mark_clean ( swiotlb_bus_to_virt ( sg - > dma_address ) , sg - > dma_length ) ;
2007-05-11 16:56:18 +04:00
}
2005-04-17 02:20:36 +04:00
}
2008-04-29 12:00:32 +04:00
EXPORT_SYMBOL ( swiotlb_unmap_sg_attrs ) ;
void
swiotlb_unmap_sg ( struct device * hwdev , struct scatterlist * sgl , int nelems ,
int dir )
{
return swiotlb_unmap_sg_attrs ( hwdev , sgl , nelems , dir , NULL ) ;
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_unmap_sg ) ;
2005-04-17 02:20:36 +04:00
/*
* Make physical memory consistent for a set of streaming mode DMA translations
* after a transfer .
*
* The same as swiotlb_sync_single_ * but for a scatter - gather list , same rules
* and usage .
*/
[PATCH] swiotlb uninlinings
Optimise swiotlb.c for size.
text data bss dec hex filename
5009 89 64 5162 142a lib/swiotlb.o-before
4666 89 64 4819 12d3 lib/swiotlb.o-after
For some reason my gcc (4.0.2) doesn't want to tailcall these things.
swiotlb_sync_sg_for_device:
pushq %rbp #
movl $1, %r8d #,
movq %rsp, %rbp #,
call swiotlb_sync_sg #
leave
ret
.size swiotlb_sync_sg_for_device, .-swiotlb_sync_sg_for_device
.section .text.swiotlb_sync_sg_for_cpu,"ax",@progbits
.globl swiotlb_sync_sg_for_cpu
.type swiotlb_sync_sg_for_cpu, @function
swiotlb_sync_sg_for_cpu:
pushq %rbp #
xorl %r8d, %r8d #
movq %rsp, %rbp #,
call swiotlb_sync_sg #
leave
ret
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-12 11:52:17 +03:00
static void
2007-05-11 16:56:18 +04:00
swiotlb_sync_sg ( struct device * hwdev , struct scatterlist * sgl ,
2005-09-30 01:44:57 +04:00
int nelems , int dir , int target )
2005-04-17 02:20:36 +04:00
{
2007-05-11 16:56:18 +04:00
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
int i ;
2006-03-24 20:47:11 +03:00
BUG_ON ( dir = = DMA_NONE ) ;
2005-04-17 02:20:36 +04:00
2007-05-11 16:56:18 +04:00
for_each_sg ( sgl , sg , nelems , i ) {
2009-01-09 21:32:10 +03:00
if ( sg - > dma_address ! = swiotlb_phys_to_bus ( hwdev , sg_phys ( sg ) ) )
2008-12-16 23:17:30 +03:00
sync_single ( hwdev , swiotlb_bus_to_virt ( sg - > dma_address ) ,
2005-09-30 01:44:57 +04:00
sg - > dma_length , dir , target ) ;
2007-02-06 05:46:40 +03:00
else if ( dir = = DMA_FROM_DEVICE )
2009-01-09 21:32:10 +03:00
dma_mark_clean ( swiotlb_bus_to_virt ( sg - > dma_address ) , sg - > dma_length ) ;
2007-05-11 16:56:18 +04:00
}
2005-04-17 02:20:36 +04:00
}
2005-09-30 01:43:32 +04:00
void
swiotlb_sync_sg_for_cpu ( struct device * hwdev , struct scatterlist * sg ,
2009-01-05 17:59:02 +03:00
int nelems , enum dma_data_direction dir )
2005-09-30 01:43:32 +04:00
{
2005-09-30 01:44:57 +04:00
swiotlb_sync_sg ( hwdev , sg , nelems , dir , SYNC_FOR_CPU ) ;
2005-09-30 01:43:32 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_sync_sg_for_cpu ) ;
2005-09-30 01:43:32 +04:00
2005-04-17 02:20:36 +04:00
void
swiotlb_sync_sg_for_device ( struct device * hwdev , struct scatterlist * sg ,
2009-01-05 17:59:02 +03:00
int nelems , enum dma_data_direction dir )
2005-04-17 02:20:36 +04:00
{
2005-09-30 01:44:57 +04:00
swiotlb_sync_sg ( hwdev , sg , nelems , dir , SYNC_FOR_DEVICE ) ;
2005-04-17 02:20:36 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_sync_sg_for_device ) ;
2005-04-17 02:20:36 +04:00
int
2008-07-26 06:44:49 +04:00
swiotlb_dma_mapping_error ( struct device * hwdev , dma_addr_t dma_addr )
2005-04-17 02:20:36 +04:00
{
2008-12-22 21:26:05 +03:00
return ( dma_addr = = swiotlb_virt_to_bus ( hwdev , io_tlb_overflow_buffer ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-12-28 09:02:07 +03:00
EXPORT_SYMBOL ( swiotlb_dma_mapping_error ) ;
2005-04-17 02:20:36 +04:00
/*
2005-09-30 02:52:13 +04:00
* Return whether the given device DMA address mask can be supported
2005-04-17 02:20:36 +04:00
* properly . For example , if your device can only drive the low 24 - bits
2005-09-30 02:52:13 +04:00
* during bus mastering , then you would pass 0x00ffffff as the mask to
2005-04-17 02:20:36 +04:00
* this function .
*/
int
2007-02-06 05:51:25 +03:00
swiotlb_dma_supported ( struct device * hwdev , u64 mask )
2005-04-17 02:20:36 +04:00
{
2008-12-22 21:26:05 +03:00
return swiotlb_virt_to_bus ( hwdev , io_tlb_end - 1 ) < = mask ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( swiotlb_dma_supported ) ;