2005-04-16 15:20:36 -07:00
/*
* arch / arm / common / dmabounce . c
*
* Special dma_ { map / unmap / dma_sync } _ * routines for systems that have
* limited DMA windows . These functions utilize bounce buffers to
* copy data to / from buffers located outside the DMA region . This
* only works for systems in which DMA memory is at the bottom of
2006-03-22 21:02:11 +00:00
* RAM , the remainder of memory is at the top and the DMA memory
2007-05-11 20:40:30 +01:00
* can be marked as ZONE_DMA . Anything beyond that such as discontiguous
2005-04-16 15:20:36 -07:00
* DMA windows will require custom implementations that reserve memory
* areas at early bootup .
*
* Original version by Brad Parker ( brad @ heeltoe . com )
* Re - written by Christopher Hoover < ch @ murgatroid . com >
* Made generic by Deepak Saxena < dsaxena @ plexity . net >
*
* Copyright ( C ) 2002 Hewlett Packard Company .
* Copyright ( C ) 2004 MontaVista Software , Inc .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/slab.h>
2008-09-09 15:54:13 -04:00
# include <linux/page-flags.h>
2005-04-16 15:20:36 -07:00
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
# include <linux/list.h>
2007-10-23 09:11:41 +02:00
# include <linux/scatterlist.h>
2005-04-16 15:20:36 -07:00
2005-06-20 16:56:08 +01:00
# include <asm/cacheflush.h>
2005-04-16 15:20:36 -07:00
# undef STATS
2005-10-30 21:12:08 +00:00
2005-04-16 15:20:36 -07:00
# ifdef STATS
# define DO_STATS(X) do { X ; } while (0)
# else
# define DO_STATS(X) do { } while (0)
# endif
/* ************************************************** */
struct safe_buffer {
struct list_head node ;
/* original request */
void * ptr ;
size_t size ;
int direction ;
/* safe buffer info */
2005-10-30 21:12:08 +00:00
struct dmabounce_pool * pool ;
2005-04-16 15:20:36 -07:00
void * safe ;
dma_addr_t safe_dma_addr ;
} ;
2005-10-30 21:12:08 +00:00
struct dmabounce_pool {
unsigned long size ;
struct dma_pool * pool ;
# ifdef STATS
unsigned long allocs ;
# endif
} ;
2005-04-16 15:20:36 -07:00
struct dmabounce_device_info {
struct device * dev ;
struct list_head safe_buffers ;
# ifdef STATS
unsigned long total_allocs ;
unsigned long map_op_count ;
unsigned long bounce_count ;
2007-02-12 10:53:50 +00:00
int attr_res ;
2005-04-16 15:20:36 -07:00
# endif
2005-10-30 21:12:08 +00:00
struct dmabounce_pool small ;
struct dmabounce_pool large ;
2006-06-22 22:27:14 +01:00
rwlock_t lock ;
2005-04-16 15:20:36 -07:00
} ;
# ifdef STATS
2007-02-12 10:53:50 +00:00
static ssize_t dmabounce_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
2005-04-16 15:20:36 -07:00
{
2007-02-12 10:53:50 +00:00
struct dmabounce_device_info * device_info = dev - > archdata . dmabounce ;
return sprintf ( buf , " %lu %lu %lu %lu %lu %lu \n " ,
device_info - > small . allocs ,
device_info - > large . allocs ,
2005-10-30 21:12:08 +00:00
device_info - > total_allocs - device_info - > small . allocs -
device_info - > large . allocs ,
2007-02-12 10:53:50 +00:00
device_info - > total_allocs ,
device_info - > map_op_count ,
device_info - > bounce_count ) ;
2005-04-16 15:20:36 -07:00
}
2007-02-12 10:53:50 +00:00
static DEVICE_ATTR ( dmabounce_stats , 0400 , dmabounce_show , NULL ) ;
2005-04-16 15:20:36 -07:00
# endif
/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer ( struct dmabounce_device_info * device_info , void * ptr ,
2005-10-30 21:12:08 +00:00
size_t size , enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
struct safe_buffer * buf ;
2005-10-30 21:12:08 +00:00
struct dmabounce_pool * pool ;
2005-04-16 15:20:36 -07:00
struct device * dev = device_info - > dev ;
2006-06-22 22:27:14 +01:00
unsigned long flags ;
2005-04-16 15:20:36 -07:00
dev_dbg ( dev , " %s(ptr=%p, size=%d, dir=%d) \n " ,
__func__ , ptr , size , dir ) ;
2005-10-30 21:12:08 +00:00
if ( size < = device_info - > small . size ) {
pool = & device_info - > small ;
} else if ( size < = device_info - > large . size ) {
pool = & device_info - > large ;
} else {
pool = NULL ;
}
2005-04-16 15:20:36 -07:00
buf = kmalloc ( sizeof ( struct safe_buffer ) , GFP_ATOMIC ) ;
if ( buf = = NULL ) {
dev_warn ( dev , " %s: kmalloc failed \n " , __func__ ) ;
return NULL ;
}
2005-10-30 21:12:08 +00:00
buf - > ptr = ptr ;
buf - > size = size ;
buf - > direction = dir ;
buf - > pool = pool ;
2005-04-16 15:20:36 -07:00
2005-10-30 21:12:08 +00:00
if ( pool ) {
buf - > safe = dma_pool_alloc ( pool - > pool , GFP_ATOMIC ,
& buf - > safe_dma_addr ) ;
2005-04-16 15:20:36 -07:00
} else {
2005-10-30 21:12:08 +00:00
buf - > safe = dma_alloc_coherent ( dev , size , & buf - > safe_dma_addr ,
GFP_ATOMIC ) ;
2005-04-16 15:20:36 -07:00
}
2005-10-30 21:12:08 +00:00
if ( buf - > safe = = NULL ) {
dev_warn ( dev ,
" %s: could not alloc dma memory (size=%d) \n " ,
__func__ , size ) ;
2005-04-16 15:20:36 -07:00
kfree ( buf ) ;
return NULL ;
}
# ifdef STATS
2005-10-30 21:12:08 +00:00
if ( pool )
pool - > allocs + + ;
device_info - > total_allocs + + ;
2005-04-16 15:20:36 -07:00
# endif
2006-06-22 22:27:14 +01:00
write_lock_irqsave ( & device_info - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
list_add ( & buf - > node , & device_info - > safe_buffers ) ;
2006-06-22 22:27:14 +01:00
write_unlock_irqrestore ( & device_info - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
return buf ;
}
/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer ( struct dmabounce_device_info * device_info , dma_addr_t safe_dma_addr )
{
2006-08-18 15:32:14 +01:00
struct safe_buffer * b , * rb = NULL ;
2006-06-22 22:27:14 +01:00
unsigned long flags ;
read_lock_irqsave ( & device_info - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
2005-06-22 21:25:58 +01:00
list_for_each_entry ( b , & device_info - > safe_buffers , node )
2006-08-18 15:32:14 +01:00
if ( b - > safe_dma_addr = = safe_dma_addr ) {
rb = b ;
2006-06-22 22:27:14 +01:00
break ;
2006-08-18 15:32:14 +01:00
}
2005-04-16 15:20:36 -07:00
2006-06-22 22:27:14 +01:00
read_unlock_irqrestore ( & device_info - > lock , flags ) ;
2006-08-18 15:32:14 +01:00
return rb ;
2005-04-16 15:20:36 -07:00
}
static inline void
free_safe_buffer ( struct dmabounce_device_info * device_info , struct safe_buffer * buf )
{
2006-06-22 22:27:14 +01:00
unsigned long flags ;
2005-04-16 15:20:36 -07:00
dev_dbg ( device_info - > dev , " %s(buf=%p) \n " , __func__ , buf ) ;
2006-06-22 22:27:14 +01:00
write_lock_irqsave ( & device_info - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
list_del ( & buf - > node ) ;
2006-06-22 22:27:14 +01:00
write_unlock_irqrestore ( & device_info - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
if ( buf - > pool )
2005-10-30 21:12:08 +00:00
dma_pool_free ( buf - > pool - > pool , buf - > safe , buf - > safe_dma_addr ) ;
2005-04-16 15:20:36 -07:00
else
dma_free_coherent ( device_info - > dev , buf - > size , buf - > safe ,
buf - > safe_dma_addr ) ;
kfree ( buf ) ;
}
/* ************************************************** */
2008-09-25 22:16:22 +01:00
static struct safe_buffer * find_safe_buffer_dev ( struct device * dev ,
dma_addr_t dma_addr , const char * where )
{
if ( ! dev | | ! dev - > archdata . dmabounce )
return NULL ;
if ( dma_mapping_error ( dev , dma_addr ) ) {
if ( dev )
dev_err ( dev , " Trying to %s invalid mapping \n " , where ) ;
else
pr_err ( " unknown device: Trying to %s invalid mapping \n " , where ) ;
return NULL ;
}
return find_safe_buffer ( dev - > archdata . dmabounce , dma_addr ) ;
}
2008-09-25 22:23:31 +01:00
static inline dma_addr_t map_single ( struct device * dev , void * ptr , size_t size ,
2005-04-16 15:20:36 -07:00
enum dma_data_direction dir )
{
2007-02-12 10:28:24 +00:00
struct dmabounce_device_info * device_info = dev - > archdata . dmabounce ;
2005-04-16 15:20:36 -07:00
dma_addr_t dma_addr ;
int needs_bounce = 0 ;
if ( device_info )
DO_STATS ( device_info - > map_op_count + + ) ;
dma_addr = virt_to_dma ( dev , ptr ) ;
if ( dev - > dma_mask ) {
unsigned long mask = * dev - > dma_mask ;
unsigned long limit ;
limit = ( mask + 1 ) & ~ mask ;
if ( limit & & size > limit ) {
dev_err ( dev , " DMA mapping too big (requested %#x "
" mask %#Lx) \n " , size , * dev - > dma_mask ) ;
return ~ 0 ;
}
/*
* Figure out if we need to bounce from the DMA mask .
*/
needs_bounce = ( dma_addr | ( dma_addr + size - 1 ) ) & ~ mask ;
}
if ( device_info & & ( needs_bounce | | dma_needs_bounce ( dev , dma_addr , size ) ) ) {
struct safe_buffer * buf ;
buf = alloc_safe_buffer ( device_info , ptr , size , dir ) ;
if ( buf = = 0 ) {
dev_err ( dev , " %s: unable to map unsafe buffer %p! \n " ,
__func__ , ptr ) ;
return 0 ;
}
dev_dbg ( dev ,
2008-08-10 12:10:49 +01:00
" %s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) ,
buf - > safe , buf - > safe_dma_addr ) ;
2005-04-16 15:20:36 -07:00
if ( ( dir = = DMA_TO_DEVICE ) | |
( dir = = DMA_BIDIRECTIONAL ) ) {
dev_dbg ( dev , " %s: copy unsafe %p to safe %p, size %d \n " ,
__func__ , ptr , buf - > safe , size ) ;
memcpy ( buf - > safe , ptr , size ) ;
}
2005-10-30 21:12:08 +00:00
ptr = buf - > safe ;
2005-04-16 15:20:36 -07:00
dma_addr = buf - > safe_dma_addr ;
2007-02-06 17:29:53 +00:00
} else {
/*
* We don ' t need to sync the DMA buffer since
* it was allocated via the coherent allocators .
*/
2009-10-31 16:52:16 +00:00
__dma_single_cpu_to_dev ( ptr , size , dir ) ;
2005-04-16 15:20:36 -07:00
}
return dma_addr ;
}
2008-09-25 22:23:31 +01:00
static inline void unmap_single ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
2008-09-25 22:16:22 +01:00
struct safe_buffer * buf = find_safe_buffer_dev ( dev , dma_addr , " unmap " ) ;
2005-04-16 15:20:36 -07:00
if ( buf ) {
BUG_ON ( buf - > size ! = size ) ;
2008-09-29 13:48:17 +01:00
BUG_ON ( buf - > direction ! = dir ) ;
2005-04-16 15:20:36 -07:00
dev_dbg ( dev ,
2008-08-10 12:10:49 +01:00
" %s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) ,
buf - > safe , buf - > safe_dma_addr ) ;
2005-04-16 15:20:36 -07:00
2008-09-25 22:16:22 +01:00
DO_STATS ( dev - > archdata . dmabounce - > bounce_count + + ) ;
2005-04-16 15:20:36 -07:00
2005-06-20 12:31:14 +01:00
if ( dir = = DMA_FROM_DEVICE | | dir = = DMA_BIDIRECTIONAL ) {
2007-02-06 17:39:31 +00:00
void * ptr = buf - > ptr ;
2005-06-20 12:31:14 +01:00
2005-04-16 15:20:36 -07:00
dev_dbg ( dev ,
" %s: copy back safe %p to unsafe %p size %d \n " ,
2007-02-06 17:39:31 +00:00
__func__ , buf - > safe , ptr , size ) ;
memcpy ( ptr , buf - > safe , size ) ;
2005-06-20 12:31:14 +01:00
/*
2009-11-26 12:31:15 +00:00
* Since we may have written to a page cache page ,
* we need to ensure that the data will be coherent
* with user mappings .
2005-06-20 12:31:14 +01:00
*/
2009-12-22 07:31:04 +01:00
__cpuc_flush_dcache_area ( ptr , size ) ;
2005-04-16 15:20:36 -07:00
}
2008-09-25 22:16:22 +01:00
free_safe_buffer ( dev - > archdata . dmabounce , buf ) ;
2009-10-31 16:52:16 +00:00
} else {
__dma_single_dev_to_cpu ( dma_to_virt ( dev , dma_addr ) , size , dir ) ;
2005-04-16 15:20:36 -07:00
}
}
/* ************************************************** */
/*
* see if a buffer address is in an ' unsafe ' range . if it is
* allocate a ' safe ' buffer and copy the unsafe buffer into it .
* substitute the safe buffer for the unsafe one .
* ( basically move the buffer from an unsafe area to a safe one )
*/
2008-09-25 22:23:31 +01:00
dma_addr_t dma_map_single ( struct device * dev , void * ptr , size_t size ,
2005-04-16 15:20:36 -07:00
enum dma_data_direction dir )
{
dev_dbg ( dev , " %s(ptr=%p,size=%d,dir=%x) \n " ,
__func__ , ptr , size , dir ) ;
2008-09-29 13:48:17 +01:00
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
2005-04-16 15:20:36 -07:00
2008-09-25 22:23:31 +01:00
return map_single ( dev , ptr , size , dir ) ;
2005-04-16 15:20:36 -07:00
}
2008-09-25 22:23:31 +01:00
EXPORT_SYMBOL ( dma_map_single ) ;
2005-04-16 15:20:36 -07:00
2009-10-31 16:10:10 +00:00
/*
* see if a mapped address was really a " safe " buffer and if so , copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer . ( basically return things back to the way they
* should be )
*/
void dma_unmap_single ( struct device * dev , dma_addr_t dma_addr , size_t size ,
enum dma_data_direction dir )
{
dev_dbg ( dev , " %s(ptr=%p,size=%d,dir=%x) \n " ,
__func__ , ( void * ) dma_addr , size , dir ) ;
unmap_single ( dev , dma_addr , size , dir ) ;
}
EXPORT_SYMBOL ( dma_unmap_single ) ;
2008-09-25 20:59:12 +01:00
dma_addr_t dma_map_page ( struct device * dev , struct page * page ,
2008-09-25 22:23:31 +01:00
unsigned long offset , size_t size , enum dma_data_direction dir )
2008-09-25 20:59:12 +01:00
{
dev_dbg ( dev , " %s(page=%p,off=%#lx,size=%zx,dir=%x) \n " ,
__func__ , page , offset , size , dir ) ;
2008-09-29 13:48:17 +01:00
BUG_ON ( ! valid_dma_direction ( dir ) ) ;
2008-09-25 20:59:12 +01:00
2008-09-09 15:54:13 -04:00
if ( PageHighMem ( page ) ) {
dev_err ( dev , " DMA buffer bouncing of HIGHMEM pages "
" is not supported \n " ) ;
return ~ 0 ;
}
2008-09-25 20:59:12 +01:00
return map_single ( dev , page_address ( page ) + offset , size , dir ) ;
}
EXPORT_SYMBOL ( dma_map_page ) ;
2005-04-16 15:20:36 -07:00
/*
* see if a mapped address was really a " safe " buffer and if so , copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer . ( basically return things back to the way they
* should be )
*/
2009-10-31 16:10:10 +00:00
void dma_unmap_page ( struct device * dev , dma_addr_t dma_addr , size_t size ,
2008-09-25 22:23:31 +01:00
enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
dev_dbg ( dev , " %s(ptr=%p,size=%d,dir=%x) \n " ,
__func__ , ( void * ) dma_addr , size , dir ) ;
unmap_single ( dev , dma_addr , size , dir ) ;
}
2009-10-31 16:10:10 +00:00
EXPORT_SYMBOL ( dma_unmap_page ) ;
2005-04-16 15:20:36 -07:00
2008-09-25 21:38:41 +01:00
int dmabounce_sync_for_cpu ( struct device * dev , dma_addr_t addr ,
unsigned long off , size_t sz , enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
2008-09-25 22:16:22 +01:00
struct safe_buffer * buf ;
dev_dbg ( dev , " %s(dma=%#x,off=%#lx,sz=%zx,dir=%x) \n " ,
2008-09-25 21:38:41 +01:00
__func__ , addr , off , sz , dir ) ;
2008-09-25 22:16:22 +01:00
buf = find_safe_buffer_dev ( dev , addr , __func__ ) ;
if ( ! buf )
return 1 ;
2008-09-29 13:48:17 +01:00
BUG_ON ( buf - > direction ! = dir ) ;
2008-09-25 22:16:22 +01:00
dev_dbg ( dev , " %s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) ,
buf - > safe , buf - > safe_dma_addr ) ;
DO_STATS ( dev - > archdata . dmabounce - > bounce_count + + ) ;
if ( dir = = DMA_FROM_DEVICE | | dir = = DMA_BIDIRECTIONAL ) {
dev_dbg ( dev , " %s: copy back safe %p to unsafe %p size %d \n " ,
__func__ , buf - > safe + off , buf - > ptr + off , sz ) ;
memcpy ( buf - > ptr + off , buf - > safe + off , sz ) ;
}
return 0 ;
2005-04-16 15:20:36 -07:00
}
2008-09-25 21:38:41 +01:00
EXPORT_SYMBOL ( dmabounce_sync_for_cpu ) ;
2005-04-16 15:20:36 -07:00
2008-09-25 21:38:41 +01:00
int dmabounce_sync_for_device ( struct device * dev , dma_addr_t addr ,
unsigned long off , size_t sz , enum dma_data_direction dir )
2005-04-16 15:20:36 -07:00
{
2008-09-25 22:16:22 +01:00
struct safe_buffer * buf ;
dev_dbg ( dev , " %s(dma=%#x,off=%#lx,sz=%zx,dir=%x) \n " ,
2008-09-25 21:38:41 +01:00
__func__ , addr , off , sz , dir ) ;
2008-09-25 22:16:22 +01:00
buf = find_safe_buffer_dev ( dev , addr , __func__ ) ;
if ( ! buf )
return 1 ;
2008-09-29 13:48:17 +01:00
BUG_ON ( buf - > direction ! = dir ) ;
2008-09-25 22:16:22 +01:00
dev_dbg ( dev , " %s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x) \n " ,
__func__ , buf - > ptr , virt_to_dma ( dev , buf - > ptr ) ,
buf - > safe , buf - > safe_dma_addr ) ;
DO_STATS ( dev - > archdata . dmabounce - > bounce_count + + ) ;
if ( dir = = DMA_TO_DEVICE | | dir = = DMA_BIDIRECTIONAL ) {
dev_dbg ( dev , " %s: copy out unsafe %p to safe %p, size %d \n " ,
__func__ , buf - > ptr + off , buf - > safe + off , sz ) ;
memcpy ( buf - > safe + off , buf - > ptr + off , sz ) ;
}
return 0 ;
2005-04-16 15:20:36 -07:00
}
2008-09-25 21:38:41 +01:00
EXPORT_SYMBOL ( dmabounce_sync_for_device ) ;
2005-04-16 15:20:36 -07:00
2008-09-25 22:23:31 +01:00
static int dmabounce_init_pool ( struct dmabounce_pool * pool , struct device * dev ,
const char * name , unsigned long size )
2005-10-30 21:12:08 +00:00
{
pool - > size = size ;
DO_STATS ( pool - > allocs = 0 ) ;
pool - > pool = dma_pool_create ( name , dev , size ,
0 /* byte alignment */ ,
0 /* no page-crossing issues */ ) ;
return pool - > pool ? 0 : - ENOMEM ;
}
2008-09-25 22:23:31 +01:00
int dmabounce_register_dev ( struct device * dev , unsigned long small_buffer_size ,
unsigned long large_buffer_size )
2005-04-16 15:20:36 -07:00
{
struct dmabounce_device_info * device_info ;
2005-10-30 21:12:08 +00:00
int ret ;
2005-04-16 15:20:36 -07:00
device_info = kmalloc ( sizeof ( struct dmabounce_device_info ) , GFP_ATOMIC ) ;
if ( ! device_info ) {
2008-05-02 06:02:41 +02:00
dev_err ( dev ,
" Could not allocated dmabounce_device_info \n " ) ;
2005-04-16 15:20:36 -07:00
return - ENOMEM ;
}
2005-10-30 21:12:08 +00:00
ret = dmabounce_init_pool ( & device_info - > small , dev ,
" small_dmabounce_pool " , small_buffer_size ) ;
if ( ret ) {
dev_err ( dev ,
" dmabounce: could not allocate DMA pool for %ld byte objects \n " ,
small_buffer_size ) ;
goto err_free ;
2005-04-16 15:20:36 -07:00
}
if ( large_buffer_size ) {
2005-10-30 21:12:08 +00:00
ret = dmabounce_init_pool ( & device_info - > large , dev ,
" large_dmabounce_pool " ,
large_buffer_size ) ;
if ( ret ) {
dev_err ( dev ,
" dmabounce: could not allocate DMA pool for %ld byte objects \n " ,
large_buffer_size ) ;
goto err_destroy ;
2005-04-16 15:20:36 -07:00
}
}
device_info - > dev = dev ;
INIT_LIST_HEAD ( & device_info - > safe_buffers ) ;
2006-06-22 22:27:14 +01:00
rwlock_init ( & device_info - > lock ) ;
2005-04-16 15:20:36 -07:00
# ifdef STATS
device_info - > total_allocs = 0 ;
device_info - > map_op_count = 0 ;
device_info - > bounce_count = 0 ;
2007-02-12 10:53:50 +00:00
device_info - > attr_res = device_create_file ( dev , & dev_attr_dmabounce_stats ) ;
2005-04-16 15:20:36 -07:00
# endif
2007-02-12 10:28:24 +00:00
dev - > archdata . dmabounce = device_info ;
2005-04-16 15:20:36 -07:00
2008-05-02 06:02:41 +02:00
dev_info ( dev , " dmabounce: registered device \n " ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
2005-10-30 21:12:08 +00:00
err_destroy :
dma_pool_destroy ( device_info - > small . pool ) ;
err_free :
kfree ( device_info ) ;
return ret ;
2005-04-16 15:20:36 -07:00
}
2008-09-25 22:23:31 +01:00
EXPORT_SYMBOL ( dmabounce_register_dev ) ;
2005-04-16 15:20:36 -07:00
2008-09-25 22:23:31 +01:00
void dmabounce_unregister_dev ( struct device * dev )
2005-04-16 15:20:36 -07:00
{
2007-02-12 10:28:24 +00:00
struct dmabounce_device_info * device_info = dev - > archdata . dmabounce ;
dev - > archdata . dmabounce = NULL ;
2005-04-16 15:20:36 -07:00
if ( ! device_info ) {
2008-05-02 06:02:41 +02:00
dev_warn ( dev ,
" Never registered with dmabounce but attempting "
" to unregister! \n " ) ;
2005-04-16 15:20:36 -07:00
return ;
}
if ( ! list_empty ( & device_info - > safe_buffers ) ) {
2008-05-02 06:02:41 +02:00
dev_err ( dev ,
" Removing from dmabounce with pending buffers! \n " ) ;
2005-04-16 15:20:36 -07:00
BUG ( ) ;
}
2005-10-30 21:12:08 +00:00
if ( device_info - > small . pool )
dma_pool_destroy ( device_info - > small . pool ) ;
if ( device_info - > large . pool )
dma_pool_destroy ( device_info - > large . pool ) ;
2005-04-16 15:20:36 -07:00
# ifdef STATS
2007-02-12 10:53:50 +00:00
if ( device_info - > attr_res = = 0 )
device_remove_file ( dev , & dev_attr_dmabounce_stats ) ;
2005-04-16 15:20:36 -07:00
# endif
kfree ( device_info ) ;
2008-05-02 06:02:41 +02:00
dev_info ( dev , " dmabounce: device unregistered \n " ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( dmabounce_unregister_dev ) ;
MODULE_AUTHOR ( " Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net> " ) ;
MODULE_DESCRIPTION ( " Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows " ) ;
MODULE_LICENSE ( " GPL " ) ;