2009-01-09 14:19:52 +03:00
/*
* Copyright ( C ) 2008 Advanced Micro Devices , Inc .
*
* Author : Joerg Roedel < joerg . roedel @ amd . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
2009-01-09 16:19:54 +03:00
# include <linux/scatterlist.h>
2009-01-09 16:10:26 +03:00
# include <linux/dma-mapping.h>
2009-01-19 18:52:39 +03:00
# include <linux/stacktrace.h>
2009-01-09 14:19:52 +03:00
# include <linux/dma-debug.h>
2009-01-09 14:34:49 +03:00
# include <linux/spinlock.h>
2009-01-09 15:13:27 +03:00
# include <linux/debugfs.h>
2009-05-22 23:23:13 +04:00
# include <linux/uaccess.h>
2009-01-09 16:10:26 +03:00
# include <linux/device.h>
2009-01-09 14:19:52 +03:00
# include <linux/types.h>
2009-01-09 16:10:26 +03:00
# include <linux/sched.h>
2009-05-22 23:23:13 +04:00
# include <linux/ctype.h>
2009-01-09 14:19:52 +03:00
# include <linux/list.h>
2009-01-09 14:54:42 +03:00
# include <linux/slab.h>
2009-01-09 14:19:52 +03:00
2009-03-16 18:51:55 +03:00
# include <asm/sections.h>
2009-01-09 14:34:49 +03:00
# define HASH_SIZE 1024ULL
# define HASH_FN_SHIFT 13
# define HASH_FN_MASK (HASH_SIZE - 1)
2009-01-09 14:19:52 +03:00
enum {
dma_debug_single ,
dma_debug_page ,
dma_debug_sg ,
dma_debug_coherent ,
} ;
2009-01-19 18:52:39 +03:00
# define DMA_DEBUG_STACKTRACE_ENTRIES 5
2009-01-09 14:19:52 +03:00
struct dma_debug_entry {
struct list_head list ;
struct device * dev ;
int type ;
phys_addr_t paddr ;
u64 dev_addr ;
u64 size ;
int direction ;
int sg_call_ents ;
int sg_mapped_ents ;
2009-01-19 18:52:39 +03:00
# ifdef CONFIG_STACKTRACE
struct stack_trace stacktrace ;
unsigned long st_entries [ DMA_DEBUG_STACKTRACE_ENTRIES ] ;
# endif
2009-01-09 14:19:52 +03:00
} ;
2009-01-09 14:34:49 +03:00
struct hash_bucket {
struct list_head list ;
spinlock_t lock ;
2009-01-09 16:10:26 +03:00
} ____cacheline_aligned_in_smp ;
2009-01-09 14:34:49 +03:00
/* Hash list to save the allocated dma addresses */
static struct hash_bucket dma_entry_hash [ HASH_SIZE ] ;
2009-01-09 14:42:46 +03:00
/* List of pre-allocated dma_debug_entry's */
static LIST_HEAD ( free_entries ) ;
/* Lock for the list above */
static DEFINE_SPINLOCK ( free_entries_lock ) ;
/* Global disable flag - will be set in case of an error */
static bool global_disable __read_mostly ;
2009-01-09 15:13:27 +03:00
/* Global error count */
static u32 error_count ;
/* Global error show enable*/
static u32 show_all_errors __read_mostly ;
/* Number of errors to show */
static u32 show_num_errors = 1 ;
2009-01-09 14:42:46 +03:00
static u32 num_free_entries ;
static u32 min_free_entries ;
2009-04-15 13:22:41 +04:00
static u32 nr_total_entries ;
2009-01-09 14:34:49 +03:00
2009-01-09 15:01:56 +03:00
/* number of preallocated entries requested by kernel cmdline */
static u32 req_entries ;
2009-01-09 15:13:27 +03:00
/* debugfs dentry's for the stuff above */
static struct dentry * dma_debug_dent __read_mostly ;
static struct dentry * global_disable_dent __read_mostly ;
static struct dentry * error_count_dent __read_mostly ;
static struct dentry * show_all_errors_dent __read_mostly ;
static struct dentry * show_num_errors_dent __read_mostly ;
static struct dentry * num_free_entries_dent __read_mostly ;
static struct dentry * min_free_entries_dent __read_mostly ;
2009-05-22 23:23:13 +04:00
static struct dentry * filter_dent __read_mostly ;
2009-01-09 15:13:27 +03:00
2009-05-22 20:24:20 +04:00
/* per-driver filter related state */
# define NAME_MAX_LEN 64
static char current_driver_name [ NAME_MAX_LEN ] __read_mostly ;
static struct device_driver * current_driver __read_mostly ;
static DEFINE_RWLOCK ( driver_name_lock ) ;
2009-01-09 15:13:27 +03:00
2009-01-09 16:10:26 +03:00
static const char * type2name [ 4 ] = { " single " , " page " ,
" scather-gather " , " coherent " } ;
static const char * dir2name [ 4 ] = { " DMA_BIDIRECTIONAL " , " DMA_TO_DEVICE " ,
" DMA_FROM_DEVICE " , " DMA_NONE " } ;
2009-05-22 19:16:04 +04:00
/* little merge helper - remove it after the merge window */
# ifndef BUS_NOTIFY_UNBOUND_DRIVER
# define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
# endif
2009-01-09 16:10:26 +03:00
/*
* The access to some variables in this macro is racy . We can ' t use atomic_t
* here because all these variables are exported to debugfs . Some of them even
* writeable . This is also the reason why a lock won ' t help much . But anyway ,
* the races are no big deal . Here is why :
*
* error_count : the addition is racy , but the worst thing that can happen is
* that we don ' t count some errors
* show_num_errors : the subtraction is racy . Also no big deal because in
* worst case this will result in one warning more in the
* system log than the user configured . This variable is
* writeable via debugfs .
*/
2009-01-19 18:52:39 +03:00
static inline void dump_entry_trace ( struct dma_debug_entry * entry )
{
# ifdef CONFIG_STACKTRACE
if ( entry ) {
2009-06-08 17:39:24 +04:00
pr_warning ( " Mapped at: \n " ) ;
2009-01-19 18:52:39 +03:00
print_stack_trace ( & entry - > stacktrace , 0 ) ;
}
# endif
}
2009-05-22 20:24:20 +04:00
static bool driver_filter ( struct device * dev )
{
2009-06-08 17:53:46 +04:00
struct device_driver * drv ;
unsigned long flags ;
bool ret ;
2009-05-22 20:24:20 +04:00
/* driver filter off */
if ( likely ( ! current_driver_name [ 0 ] ) )
return true ;
/* driver filter on and initialized */
2009-08-20 05:17:08 +04:00
if ( current_driver & & dev & & dev - > driver = = current_driver )
2009-05-22 20:24:20 +04:00
return true ;
2009-08-20 05:17:08 +04:00
/* driver filter on, but we can't filter on a NULL device... */
if ( ! dev )
return false ;
2009-06-08 17:53:46 +04:00
if ( current_driver | | ! current_driver_name [ 0 ] )
return false ;
2009-05-22 20:24:20 +04:00
2009-06-08 17:53:46 +04:00
/* driver filter on but not yet initialized */
drv = get_driver ( dev - > driver ) ;
if ( ! drv )
return false ;
/* lock to protect against change of current_driver_name */
read_lock_irqsave ( & driver_name_lock , flags ) ;
ret = false ;
if ( drv - > name & &
strncmp ( current_driver_name , drv - > name , NAME_MAX_LEN - 1 ) = = 0 ) {
current_driver = drv ;
ret = true ;
2009-05-22 20:24:20 +04:00
}
2009-06-08 17:53:46 +04:00
read_unlock_irqrestore ( & driver_name_lock , flags ) ;
put_driver ( drv ) ;
return ret ;
2009-05-22 20:24:20 +04:00
}
2009-08-20 05:17:08 +04:00
# define err_printk(dev, entry, format, arg...) do { \
error_count + = 1 ; \
if ( driver_filter ( dev ) & & \
( show_all_errors | | show_num_errors > 0 ) ) { \
WARN ( 1 , " %s %s: " format , \
dev ? dev_driver_string ( dev ) : " NULL " , \
dev ? dev_name ( dev ) : " NULL " , # # arg ) ; \
dump_entry_trace ( entry ) ; \
} \
if ( ! show_all_errors & & show_num_errors > 0 ) \
show_num_errors - = 1 ; \
2009-01-09 16:10:26 +03:00
} while ( 0 ) ;
2009-01-09 14:34:49 +03:00
/*
* Hash related functions
*
* Every DMA - API request is saved into a struct dma_debug_entry . To
* have quick access to these structs they are stored into a hash .
*/
static int hash_fn ( struct dma_debug_entry * entry )
{
/*
* Hash function is based on the dma address .
* We use bits 20 - 27 here as the index into the hash
*/
return ( entry - > dev_addr > > HASH_FN_SHIFT ) & HASH_FN_MASK ;
}
/*
* Request exclusive access to a hash bucket for a given dma_debug_entry .
*/
static struct hash_bucket * get_hash_bucket ( struct dma_debug_entry * entry ,
unsigned long * flags )
{
int idx = hash_fn ( entry ) ;
unsigned long __flags ;
spin_lock_irqsave ( & dma_entry_hash [ idx ] . lock , __flags ) ;
* flags = __flags ;
return & dma_entry_hash [ idx ] ;
}
/*
* Give up exclusive access to the hash bucket
*/
static void put_hash_bucket ( struct hash_bucket * bucket ,
unsigned long * flags )
{
unsigned long __flags = * flags ;
spin_unlock_irqrestore ( & bucket - > lock , __flags ) ;
}
/*
* Search a given entry in the hash bucket list
*/
static struct dma_debug_entry * hash_bucket_find ( struct hash_bucket * bucket ,
struct dma_debug_entry * ref )
{
2009-06-05 14:01:35 +04:00
struct dma_debug_entry * entry , * ret = NULL ;
int matches = 0 , match_lvl , last_lvl = 0 ;
2009-01-09 14:34:49 +03:00
list_for_each_entry ( entry , & bucket - > list , list ) {
2009-06-05 14:01:35 +04:00
if ( ( entry - > dev_addr ! = ref - > dev_addr ) | |
( entry - > dev ! = ref - > dev ) )
continue ;
/*
* Some drivers map the same physical address multiple
* times . Without a hardware IOMMU this results in the
* same device addresses being put into the dma - debug
* hash multiple times too . This can result in false
tree-wide: fix assorted typos all over the place
That is "success", "unknown", "through", "performance", "[re|un]mapping"
, "access", "default", "reasonable", "[con]currently", "temperature"
, "channel", "[un]used", "application", "example","hierarchy", "therefore"
, "[over|under]flow", "contiguous", "threshold", "enough" and others.
Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2009-11-14 18:09:05 +03:00
* positives being reported . Therefore we implement a
2009-06-05 14:01:35 +04:00
* best - fit algorithm here which returns the entry from
* the hash which fits best to the reference value
* instead of the first - fit .
*/
matches + = 1 ;
match_lvl = 0 ;
2009-06-11 12:03:42 +04:00
entry - > size = = ref - > size ? + + match_lvl : 0 ;
entry - > type = = ref - > type ? + + match_lvl : 0 ;
entry - > direction = = ref - > direction ? + + match_lvl : 0 ;
entry - > sg_call_ents = = ref - > sg_call_ents ? + + match_lvl : 0 ;
2009-06-05 14:01:35 +04:00
2009-06-11 12:03:42 +04:00
if ( match_lvl = = 4 ) {
2009-06-05 14:01:35 +04:00
/* perfect-fit - return the result */
2009-01-09 14:34:49 +03:00
return entry ;
2009-06-05 14:01:35 +04:00
} else if ( match_lvl > last_lvl ) {
/*
* We found an entry that fits better then the
* previous one
*/
last_lvl = match_lvl ;
ret = entry ;
}
2009-01-09 14:34:49 +03:00
}
2009-06-05 14:01:35 +04:00
/*
* If we have multiple matches but no perfect - fit , just return
* NULL .
*/
ret = ( matches = = 1 ) ? ret : NULL ;
return ret ;
2009-01-09 14:34:49 +03:00
}
/*
* Add an entry to a hash bucket
*/
static void hash_bucket_add ( struct hash_bucket * bucket ,
struct dma_debug_entry * entry )
{
list_add_tail ( & entry - > list , & bucket - > list ) ;
}
/*
* Remove entry from a hash bucket list
*/
static void hash_bucket_del ( struct dma_debug_entry * entry )
{
list_del ( & entry - > list ) ;
}
2009-02-12 18:19:13 +03:00
/*
* Dump mapping entries for debugging purposes
*/
void debug_dma_dump_mappings ( struct device * dev )
{
int idx ;
for ( idx = 0 ; idx < HASH_SIZE ; idx + + ) {
struct hash_bucket * bucket = & dma_entry_hash [ idx ] ;
struct dma_debug_entry * entry ;
unsigned long flags ;
spin_lock_irqsave ( & bucket - > lock , flags ) ;
list_for_each_entry ( entry , & bucket - > list , list ) {
if ( ! dev | | dev = = entry - > dev ) {
dev_info ( entry - > dev ,
" %s idx %d P=%Lx D=%Lx L=%Lx %s \n " ,
type2name [ entry - > type ] , idx ,
( unsigned long long ) entry - > paddr ,
entry - > dev_addr , entry - > size ,
dir2name [ entry - > direction ] ) ;
}
}
spin_unlock_irqrestore ( & bucket - > lock , flags ) ;
}
}
EXPORT_SYMBOL ( debug_dma_dump_mappings ) ;
2009-01-09 14:34:49 +03:00
/*
* Wrapper function for adding an entry to the hash .
* This function takes care of locking itself .
*/
static void add_dma_entry ( struct dma_debug_entry * entry )
{
struct hash_bucket * bucket ;
unsigned long flags ;
bucket = get_hash_bucket ( entry , & flags ) ;
hash_bucket_add ( bucket , entry ) ;
put_hash_bucket ( bucket , & flags ) ;
}
2009-04-15 13:22:41 +04:00
static struct dma_debug_entry * __dma_entry_alloc ( void )
{
struct dma_debug_entry * entry ;
entry = list_entry ( free_entries . next , struct dma_debug_entry , list ) ;
list_del ( & entry - > list ) ;
memset ( entry , 0 , sizeof ( * entry ) ) ;
num_free_entries - = 1 ;
if ( num_free_entries < min_free_entries )
min_free_entries = num_free_entries ;
return entry ;
}
2009-01-09 14:42:46 +03:00
/* struct dma_entry allocator
*
* The next two functions implement the allocator for
* struct dma_debug_entries .
*/
static struct dma_debug_entry * dma_entry_alloc ( void )
{
struct dma_debug_entry * entry = NULL ;
unsigned long flags ;
spin_lock_irqsave ( & free_entries_lock , flags ) ;
if ( list_empty ( & free_entries ) ) {
2009-06-08 17:39:24 +04:00
pr_err ( " DMA-API: debugging out of memory - disabling \n " ) ;
2009-01-09 14:42:46 +03:00
global_disable = true ;
goto out ;
}
2009-04-15 13:22:41 +04:00
entry = __dma_entry_alloc ( ) ;
2009-01-09 14:42:46 +03:00
2009-01-19 18:52:39 +03:00
# ifdef CONFIG_STACKTRACE
entry - > stacktrace . max_entries = DMA_DEBUG_STACKTRACE_ENTRIES ;
entry - > stacktrace . entries = entry - > st_entries ;
entry - > stacktrace . skip = 2 ;
save_stack_trace ( & entry - > stacktrace ) ;
# endif
2009-01-09 14:42:46 +03:00
out :
spin_unlock_irqrestore ( & free_entries_lock , flags ) ;
return entry ;
}
static void dma_entry_free ( struct dma_debug_entry * entry )
{
unsigned long flags ;
/*
* add to beginning of the list - this way the entries are
* more likely cache hot when they are reallocated .
*/
spin_lock_irqsave ( & free_entries_lock , flags ) ;
list_add ( & entry - > list , & free_entries ) ;
num_free_entries + = 1 ;
spin_unlock_irqrestore ( & free_entries_lock , flags ) ;
}
2009-04-15 13:22:41 +04:00
int dma_debug_resize_entries ( u32 num_entries )
{
int i , delta , ret = 0 ;
unsigned long flags ;
struct dma_debug_entry * entry ;
LIST_HEAD ( tmp ) ;
spin_lock_irqsave ( & free_entries_lock , flags ) ;
if ( nr_total_entries < num_entries ) {
delta = num_entries - nr_total_entries ;
spin_unlock_irqrestore ( & free_entries_lock , flags ) ;
for ( i = 0 ; i < delta ; i + + ) {
entry = kzalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
if ( ! entry )
break ;
list_add_tail ( & entry - > list , & tmp ) ;
}
spin_lock_irqsave ( & free_entries_lock , flags ) ;
list_splice ( & tmp , & free_entries ) ;
nr_total_entries + = i ;
num_free_entries + = i ;
} else {
delta = nr_total_entries - num_entries ;
for ( i = 0 ; i < delta & & ! list_empty ( & free_entries ) ; i + + ) {
entry = __dma_entry_alloc ( ) ;
kfree ( entry ) ;
}
nr_total_entries - = i ;
}
if ( nr_total_entries ! = num_entries )
ret = 1 ;
spin_unlock_irqrestore ( & free_entries_lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL ( dma_debug_resize_entries ) ;
2009-01-09 14:54:42 +03:00
/*
* DMA - API debugging init code
*
* The init code does two things :
* 1. Initialize core data structures
* 2. Preallocate a given number of dma_debug_entry structs
*/
static int prealloc_memory ( u32 num_entries )
{
struct dma_debug_entry * entry , * next_entry ;
int i ;
for ( i = 0 ; i < num_entries ; + + i ) {
entry = kzalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
if ( ! entry )
goto out_err ;
list_add_tail ( & entry - > list , & free_entries ) ;
}
num_free_entries = num_entries ;
min_free_entries = num_entries ;
2009-06-08 17:39:24 +04:00
pr_info ( " DMA-API: preallocated %d debug entries \n " , num_entries ) ;
2009-01-09 14:54:42 +03:00
return 0 ;
out_err :
list_for_each_entry_safe ( entry , next_entry , & free_entries , list ) {
list_del ( & entry - > list ) ;
kfree ( entry ) ;
}
return - ENOMEM ;
}
2009-05-22 23:23:13 +04:00
static ssize_t filter_read ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
char buf [ NAME_MAX_LEN + 1 ] ;
2009-06-08 17:19:29 +04:00
unsigned long flags ;
2009-05-22 23:23:13 +04:00
int len ;
if ( ! current_driver_name [ 0 ] )
return 0 ;
/*
* We can ' t copy to userspace directly because current_driver_name can
* only be read under the driver_name_lock with irqs disabled . So
* create a temporary copy first .
*/
read_lock_irqsave ( & driver_name_lock , flags ) ;
len = scnprintf ( buf , NAME_MAX_LEN + 1 , " %s \n " , current_driver_name ) ;
read_unlock_irqrestore ( & driver_name_lock , flags ) ;
return simple_read_from_buffer ( user_buf , count , ppos , buf , len ) ;
}
static ssize_t filter_write ( struct file * file , const char __user * userbuf ,
size_t count , loff_t * ppos )
{
char buf [ NAME_MAX_LEN ] ;
2009-06-08 17:19:29 +04:00
unsigned long flags ;
size_t len ;
2009-05-22 23:23:13 +04:00
int i ;
/*
* We can ' t copy from userspace directly . Access to
* current_driver_name is protected with a write_lock with irqs
* disabled . Since copy_from_user can fault and may sleep we
* need to copy to temporary buffer first
*/
2009-06-08 17:39:24 +04:00
len = min ( count , ( size_t ) ( NAME_MAX_LEN - 1 ) ) ;
2009-05-22 23:23:13 +04:00
if ( copy_from_user ( buf , userbuf , len ) )
return - EFAULT ;
buf [ len ] = 0 ;
write_lock_irqsave ( & driver_name_lock , flags ) ;
2009-06-08 17:07:08 +04:00
/*
* Now handle the string we got from userspace very carefully .
2009-05-22 23:23:13 +04:00
* The rules are :
* - only use the first token we got
* - token delimiter is everything looking like a space
* character ( ' ' , ' \n ' , ' \t ' . . . )
*
*/
if ( ! isalnum ( buf [ 0 ] ) ) {
/*
2009-06-08 17:07:08 +04:00
* If the first character userspace gave us is not
2009-05-22 23:23:13 +04:00
* alphanumerical then assume the filter should be
* switched off .
*/
if ( current_driver_name [ 0 ] )
2009-06-08 17:39:24 +04:00
pr_info ( " DMA-API: switching off dma-debug driver filter \n " ) ;
2009-05-22 23:23:13 +04:00
current_driver_name [ 0 ] = 0 ;
current_driver = NULL ;
goto out_unlock ;
}
/*
* Now parse out the first token and use it as the name for the
* driver to filter for .
*/
2010-04-06 20:45:12 +04:00
for ( i = 0 ; i < NAME_MAX_LEN - 1 ; + + i ) {
2009-05-22 23:23:13 +04:00
current_driver_name [ i ] = buf [ i ] ;
if ( isspace ( buf [ i ] ) | | buf [ i ] = = ' ' | | buf [ i ] = = 0 )
break ;
}
current_driver_name [ i ] = 0 ;
current_driver = NULL ;
2009-06-08 17:39:24 +04:00
pr_info ( " DMA-API: enable driver filter for driver [%s] \n " ,
current_driver_name ) ;
2009-05-22 23:23:13 +04:00
out_unlock :
write_unlock_irqrestore ( & driver_name_lock , flags ) ;
return count ;
}
2010-01-19 02:57:33 +03:00
static const struct file_operations filter_fops = {
2009-05-22 23:23:13 +04:00
. read = filter_read ,
. write = filter_write ,
} ;
2009-01-09 15:13:27 +03:00
static int dma_debug_fs_init ( void )
{
dma_debug_dent = debugfs_create_dir ( " dma-api " , NULL ) ;
if ( ! dma_debug_dent ) {
2009-06-08 17:39:24 +04:00
pr_err ( " DMA-API: can not create debugfs directory \n " ) ;
2009-01-09 15:13:27 +03:00
return - ENOMEM ;
}
global_disable_dent = debugfs_create_bool ( " disabled " , 0444 ,
dma_debug_dent ,
( u32 * ) & global_disable ) ;
if ( ! global_disable_dent )
goto out_err ;
error_count_dent = debugfs_create_u32 ( " error_count " , 0444 ,
dma_debug_dent , & error_count ) ;
if ( ! error_count_dent )
goto out_err ;
show_all_errors_dent = debugfs_create_u32 ( " all_errors " , 0644 ,
dma_debug_dent ,
& show_all_errors ) ;
if ( ! show_all_errors_dent )
goto out_err ;
show_num_errors_dent = debugfs_create_u32 ( " num_errors " , 0644 ,
dma_debug_dent ,
& show_num_errors ) ;
if ( ! show_num_errors_dent )
goto out_err ;
num_free_entries_dent = debugfs_create_u32 ( " num_free_entries " , 0444 ,
dma_debug_dent ,
& num_free_entries ) ;
if ( ! num_free_entries_dent )
goto out_err ;
min_free_entries_dent = debugfs_create_u32 ( " min_free_entries " , 0444 ,
dma_debug_dent ,
& min_free_entries ) ;
if ( ! min_free_entries_dent )
goto out_err ;
2009-05-22 23:23:13 +04:00
filter_dent = debugfs_create_file ( " driver_filter " , 0644 ,
dma_debug_dent , NULL , & filter_fops ) ;
if ( ! filter_dent )
goto out_err ;
2009-01-09 15:13:27 +03:00
return 0 ;
out_err :
debugfs_remove_recursive ( dma_debug_dent ) ;
return - ENOMEM ;
}
2009-05-22 19:16:04 +04:00
static int device_dma_allocations ( struct device * dev )
{
struct dma_debug_entry * entry ;
unsigned long flags ;
int count = 0 , i ;
2009-06-08 17:46:19 +04:00
local_irq_save ( flags ) ;
2009-05-22 19:16:04 +04:00
for ( i = 0 ; i < HASH_SIZE ; + + i ) {
2009-06-08 17:46:19 +04:00
spin_lock ( & dma_entry_hash [ i ] . lock ) ;
2009-05-22 19:16:04 +04:00
list_for_each_entry ( entry , & dma_entry_hash [ i ] . list , list ) {
if ( entry - > dev = = dev )
count + = 1 ;
}
2009-06-08 17:46:19 +04:00
spin_unlock ( & dma_entry_hash [ i ] . lock ) ;
2009-05-22 19:16:04 +04:00
}
2009-06-08 17:46:19 +04:00
local_irq_restore ( flags ) ;
2009-05-22 19:16:04 +04:00
return count ;
}
2009-12-31 17:16:23 +03:00
static int dma_debug_device_change ( struct notifier_block * nb , unsigned long action , void * data )
2009-05-22 19:16:04 +04:00
{
struct device * dev = data ;
int count ;
2009-12-18 03:00:36 +03:00
if ( global_disable )
2009-12-31 17:16:23 +03:00
return 0 ;
2009-05-22 19:16:04 +04:00
switch ( action ) {
case BUS_NOTIFY_UNBOUND_DRIVER :
count = device_dma_allocations ( dev ) ;
if ( count = = 0 )
break ;
err_printk ( dev , NULL , " DMA-API: device driver has pending "
" DMA allocations while released from device "
" [count=%d] \n " , count ) ;
break ;
default :
break ;
}
return 0 ;
}
2009-03-16 19:32:14 +03:00
void dma_debug_add_bus ( struct bus_type * bus )
{
2009-05-22 19:16:04 +04:00
struct notifier_block * nb ;
2009-12-18 03:00:36 +03:00
if ( global_disable )
return ;
2009-05-22 19:16:04 +04:00
nb = kzalloc ( sizeof ( struct notifier_block ) , GFP_KERNEL ) ;
if ( nb = = NULL ) {
2009-06-08 17:39:24 +04:00
pr_err ( " dma_debug_add_bus: out of memory \n " ) ;
2009-05-22 19:16:04 +04:00
return ;
}
nb - > notifier_call = dma_debug_device_change ;
bus_register_notifier ( bus , nb ) ;
2009-03-16 19:32:14 +03:00
}
2009-01-09 15:13:27 +03:00
2009-01-09 14:54:42 +03:00
/*
* Let the architectures decide how many entries should be preallocated .
*/
void dma_debug_init ( u32 num_entries )
{
int i ;
if ( global_disable )
return ;
for ( i = 0 ; i < HASH_SIZE ; + + i ) {
INIT_LIST_HEAD ( & dma_entry_hash [ i ] . list ) ;
2009-06-16 18:11:14 +04:00
spin_lock_init ( & dma_entry_hash [ i ] . lock ) ;
2009-01-09 14:54:42 +03:00
}
2009-01-09 15:13:27 +03:00
if ( dma_debug_fs_init ( ) ! = 0 ) {
2009-06-08 17:39:24 +04:00
pr_err ( " DMA-API: error creating debugfs entries - disabling \n " ) ;
2009-01-09 15:13:27 +03:00
global_disable = true ;
return ;
}
2009-01-09 15:01:56 +03:00
if ( req_entries )
num_entries = req_entries ;
2009-01-09 14:54:42 +03:00
if ( prealloc_memory ( num_entries ) ! = 0 ) {
2009-06-08 17:39:24 +04:00
pr_err ( " DMA-API: debugging out of memory error - disabled \n " ) ;
2009-01-09 14:54:42 +03:00
global_disable = true ;
return ;
}
2009-04-15 13:22:41 +04:00
nr_total_entries = num_free_entries ;
2009-06-08 17:39:24 +04:00
pr_info ( " DMA-API: debugging enabled by kernel config \n " ) ;
2009-01-09 14:54:42 +03:00
}
2009-01-09 15:01:56 +03:00
static __init int dma_debug_cmdline ( char * str )
{
if ( ! str )
return - EINVAL ;
if ( strncmp ( str , " off " , 3 ) = = 0 ) {
2009-06-08 17:39:24 +04:00
pr_info ( " DMA-API: debugging disabled on kernel command line \n " ) ;
2009-01-09 15:01:56 +03:00
global_disable = true ;
}
return 0 ;
}
static __init int dma_debug_entries_cmdline ( char * str )
{
int res ;
if ( ! str )
return - EINVAL ;
res = get_option ( & str , & req_entries ) ;
if ( ! res )
req_entries = 0 ;
return 0 ;
}
__setup ( " dma_debug= " , dma_debug_cmdline ) ;
__setup ( " dma_debug_entries= " , dma_debug_entries_cmdline ) ;
2009-01-09 16:10:26 +03:00
static void check_unmap ( struct dma_debug_entry * ref )
{
struct dma_debug_entry * entry ;
struct hash_bucket * bucket ;
unsigned long flags ;
2009-03-19 04:39:31 +03:00
if ( dma_mapping_error ( ref - > dev , ref - > dev_addr ) ) {
err_printk ( ref - > dev , NULL , " DMA-API: device driver tries "
" to free an invalid DMA memory address \n " ) ;
2009-01-09 16:10:26 +03:00
return ;
2009-03-19 04:39:31 +03:00
}
2009-01-09 16:10:26 +03:00
bucket = get_hash_bucket ( ref , & flags ) ;
entry = hash_bucket_find ( bucket , ref ) ;
if ( ! entry ) {
2009-01-19 18:52:39 +03:00
err_printk ( ref - > dev , NULL , " DMA-API: device driver tries "
2009-01-09 16:10:26 +03:00
" to free DMA memory it has not allocated "
" [device address=0x%016llx] [size=%llu bytes] \n " ,
ref - > dev_addr , ref - > size ) ;
goto out ;
}
if ( ref - > size ! = entry - > size ) {
2009-01-19 18:52:39 +03:00
err_printk ( ref - > dev , entry , " DMA-API: device driver frees "
2009-01-09 16:10:26 +03:00
" DMA memory with different size "
" [device address=0x%016llx] [map size=%llu bytes] "
" [unmap size=%llu bytes] \n " ,
ref - > dev_addr , entry - > size , ref - > size ) ;
}
if ( ref - > type ! = entry - > type ) {
2009-01-19 18:52:39 +03:00
err_printk ( ref - > dev , entry , " DMA-API: device driver frees "
2009-01-09 16:10:26 +03:00
" DMA memory with wrong function "
" [device address=0x%016llx] [size=%llu bytes] "
" [mapped as %s] [unmapped as %s] \n " ,
ref - > dev_addr , ref - > size ,
type2name [ entry - > type ] , type2name [ ref - > type ] ) ;
} else if ( ( entry - > type = = dma_debug_coherent ) & &
( ref - > paddr ! = entry - > paddr ) ) {
2009-01-19 18:52:39 +03:00
err_printk ( ref - > dev , entry , " DMA-API: device driver frees "
2009-01-09 16:10:26 +03:00
" DMA memory with different CPU address "
" [device address=0x%016llx] [size=%llu bytes] "
2009-10-29 18:25:50 +03:00
" [cpu alloc address=0x%016llx] "
" [cpu free address=0x%016llx] " ,
2009-01-09 16:10:26 +03:00
ref - > dev_addr , ref - > size ,
2009-10-29 18:25:50 +03:00
( unsigned long long ) entry - > paddr ,
( unsigned long long ) ref - > paddr ) ;
2009-01-09 16:10:26 +03:00
}
if ( ref - > sg_call_ents & & ref - > type = = dma_debug_sg & &
ref - > sg_call_ents ! = entry - > sg_call_ents ) {
2009-01-19 18:52:39 +03:00
err_printk ( ref - > dev , entry , " DMA-API: device driver frees "
2009-01-09 16:10:26 +03:00
" DMA sg list with different entry count "
" [map count=%d] [unmap count=%d] \n " ,
entry - > sg_call_ents , ref - > sg_call_ents ) ;
}
/*
* This may be no bug in reality - but most implementations of the
* DMA API don ' t handle this properly , so check for it here
*/
if ( ref - > direction ! = entry - > direction ) {
2009-01-19 18:52:39 +03:00
err_printk ( ref - > dev , entry , " DMA-API: device driver frees "
2009-01-09 16:10:26 +03:00
" DMA memory with different direction "
" [device address=0x%016llx] [size=%llu bytes] "
" [mapped with %s] [unmapped with %s] \n " ,
ref - > dev_addr , ref - > size ,
dir2name [ entry - > direction ] ,
dir2name [ ref - > direction ] ) ;
}
hash_bucket_del ( entry ) ;
dma_entry_free ( entry ) ;
out :
put_hash_bucket ( bucket , & flags ) ;
}
static void check_for_stack ( struct device * dev , void * addr )
{
if ( object_is_on_stack ( addr ) )
2009-01-19 18:52:39 +03:00
err_printk ( dev , NULL , " DMA-API: device driver maps memory from "
" stack [addr=%p] \n " , addr ) ;
2009-01-09 16:10:26 +03:00
}
2009-07-10 23:38:02 +04:00
static inline bool overlap ( void * addr , unsigned long len , void * start , void * end )
2009-03-16 18:51:55 +03:00
{
2009-07-10 23:38:02 +04:00
unsigned long a1 = ( unsigned long ) addr ;
unsigned long b1 = a1 + len ;
unsigned long a2 = ( unsigned long ) start ;
unsigned long b2 = ( unsigned long ) end ;
2009-03-16 18:51:55 +03:00
2009-07-10 23:38:02 +04:00
return ! ( b1 < = a2 | | a1 > = b2 ) ;
2009-03-16 18:51:55 +03:00
}
2009-07-10 23:38:02 +04:00
static void check_for_illegal_area ( struct device * dev , void * addr , unsigned long len )
2009-03-16 18:51:55 +03:00
{
2009-07-10 23:38:02 +04:00
if ( overlap ( addr , len , _text , _etext ) | |
overlap ( addr , len , __start_rodata , __end_rodata ) )
err_printk ( dev , NULL , " DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu] \n " , addr , len ) ;
2009-03-16 18:51:55 +03:00
}
2009-06-12 17:25:06 +04:00
static void check_sync ( struct device * dev ,
struct dma_debug_entry * ref ,
bool to_cpu )
2009-01-09 16:10:26 +03:00
{
struct dma_debug_entry * entry ;
struct hash_bucket * bucket ;
unsigned long flags ;
2009-06-12 17:25:06 +04:00
bucket = get_hash_bucket ( ref , & flags ) ;
2009-01-09 16:10:26 +03:00
2009-06-12 17:25:06 +04:00
entry = hash_bucket_find ( bucket , ref ) ;
2009-01-09 16:10:26 +03:00
if ( ! entry ) {
2009-01-19 18:52:39 +03:00
err_printk ( dev , NULL , " DMA-API: device driver tries "
2009-01-09 16:10:26 +03:00
" to sync DMA memory it has not allocated "
" [device address=0x%016llx] [size=%llu bytes] \n " ,
2009-06-12 17:25:06 +04:00
( unsigned long long ) ref - > dev_addr , ref - > size ) ;
2009-01-09 16:10:26 +03:00
goto out ;
}
2009-06-12 17:25:06 +04:00
if ( ref - > size > entry - > size ) {
2009-01-19 18:52:39 +03:00
err_printk ( dev , entry , " DMA-API: device driver syncs "
2009-01-09 16:10:26 +03:00
" DMA memory outside allocated range "
" [device address=0x%016llx] "
2009-06-12 17:25:06 +04:00
" [allocation size=%llu bytes] "
" [sync offset+size=%llu] \n " ,
entry - > dev_addr , entry - > size ,
ref - > size ) ;
2009-01-09 16:10:26 +03:00
}
2010-01-09 01:42:36 +03:00
if ( entry - > direction = = DMA_BIDIRECTIONAL )
goto out ;
2009-06-12 17:25:06 +04:00
if ( ref - > direction ! = entry - > direction ) {
2009-01-19 18:52:39 +03:00
err_printk ( dev , entry , " DMA-API: device driver syncs "
2009-01-09 16:10:26 +03:00
" DMA memory with different direction "
" [device address=0x%016llx] [size=%llu bytes] "
" [mapped with %s] [synced with %s] \n " ,
2009-06-12 17:25:06 +04:00
( unsigned long long ) ref - > dev_addr , entry - > size ,
2009-01-09 16:10:26 +03:00
dir2name [ entry - > direction ] ,
2009-06-12 17:25:06 +04:00
dir2name [ ref - > direction ] ) ;
2009-01-09 16:10:26 +03:00
}
if ( to_cpu & & ! ( entry - > direction = = DMA_FROM_DEVICE ) & &
2009-06-12 17:25:06 +04:00
! ( ref - > direction = = DMA_TO_DEVICE ) )
2009-01-19 18:52:39 +03:00
err_printk ( dev , entry , " DMA-API: device driver syncs "
2009-01-09 16:10:26 +03:00
" device read-only DMA memory for cpu "
" [device address=0x%016llx] [size=%llu bytes] "
" [mapped with %s] [synced with %s] \n " ,
2009-06-12 17:25:06 +04:00
( unsigned long long ) ref - > dev_addr , entry - > size ,
2009-01-09 16:10:26 +03:00
dir2name [ entry - > direction ] ,
2009-06-12 17:25:06 +04:00
dir2name [ ref - > direction ] ) ;
2009-01-09 16:10:26 +03:00
if ( ! to_cpu & & ! ( entry - > direction = = DMA_TO_DEVICE ) & &
2009-06-12 17:25:06 +04:00
! ( ref - > direction = = DMA_FROM_DEVICE ) )
2009-01-19 18:52:39 +03:00
err_printk ( dev , entry , " DMA-API: device driver syncs "
2009-01-09 16:10:26 +03:00
" device write-only DMA memory to device "
" [device address=0x%016llx] [size=%llu bytes] "
" [mapped with %s] [synced with %s] \n " ,
2009-06-12 17:25:06 +04:00
( unsigned long long ) ref - > dev_addr , entry - > size ,
2009-01-09 16:10:26 +03:00
dir2name [ entry - > direction ] ,
2009-06-12 17:25:06 +04:00
dir2name [ ref - > direction ] ) ;
2009-01-09 16:10:26 +03:00
out :
put_hash_bucket ( bucket , & flags ) ;
}
2009-01-09 16:14:49 +03:00
void debug_dma_map_page ( struct device * dev , struct page * page , size_t offset ,
size_t size , int direction , dma_addr_t dma_addr ,
bool map_single )
{
struct dma_debug_entry * entry ;
if ( unlikely ( global_disable ) )
return ;
if ( unlikely ( dma_mapping_error ( dev , dma_addr ) ) )
return ;
entry = dma_entry_alloc ( ) ;
if ( ! entry )
return ;
entry - > dev = dev ;
entry - > type = dma_debug_page ;
entry - > paddr = page_to_phys ( page ) + offset ;
entry - > dev_addr = dma_addr ;
entry - > size = size ;
entry - > direction = direction ;
2009-03-23 17:35:08 +03:00
if ( map_single )
2009-01-09 16:14:49 +03:00
entry - > type = dma_debug_single ;
2009-03-23 17:35:08 +03:00
if ( ! PageHighMem ( page ) ) {
2009-07-10 23:38:02 +04:00
void * addr = page_address ( page ) + offset ;
2009-03-16 18:51:55 +03:00
check_for_stack ( dev , addr ) ;
check_for_illegal_area ( dev , addr , size ) ;
2009-01-09 16:14:49 +03:00
}
add_dma_entry ( entry ) ;
}
EXPORT_SYMBOL ( debug_dma_map_page ) ;
void debug_dma_unmap_page ( struct device * dev , dma_addr_t addr ,
size_t size , int direction , bool map_single )
{
struct dma_debug_entry ref = {
. type = dma_debug_page ,
. dev = dev ,
. dev_addr = addr ,
. size = size ,
. direction = direction ,
} ;
if ( unlikely ( global_disable ) )
return ;
if ( map_single )
ref . type = dma_debug_single ;
check_unmap ( & ref ) ;
}
EXPORT_SYMBOL ( debug_dma_unmap_page ) ;
2009-01-09 16:19:54 +03:00
void debug_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , int mapped_ents , int direction )
{
struct dma_debug_entry * entry ;
struct scatterlist * s ;
int i ;
if ( unlikely ( global_disable ) )
return ;
for_each_sg ( sg , s , mapped_ents , i ) {
entry = dma_entry_alloc ( ) ;
if ( ! entry )
return ;
entry - > type = dma_debug_sg ;
entry - > dev = dev ;
entry - > paddr = sg_phys ( s ) ;
2009-05-27 04:43:02 +04:00
entry - > size = sg_dma_len ( s ) ;
2009-05-27 04:43:01 +04:00
entry - > dev_addr = sg_dma_address ( s ) ;
2009-01-09 16:19:54 +03:00
entry - > direction = direction ;
entry - > sg_call_ents = nents ;
entry - > sg_mapped_ents = mapped_ents ;
2009-03-23 17:35:08 +03:00
if ( ! PageHighMem ( sg_page ( s ) ) ) {
check_for_stack ( dev , sg_virt ( s ) ) ;
2009-05-27 04:43:02 +04:00
check_for_illegal_area ( dev , sg_virt ( s ) , sg_dma_len ( s ) ) ;
2009-03-23 17:35:08 +03:00
}
2009-01-09 16:19:54 +03:00
add_dma_entry ( entry ) ;
}
}
EXPORT_SYMBOL ( debug_dma_map_sg ) ;
2009-06-12 17:25:06 +04:00
static int get_nr_mapped_entries ( struct device * dev ,
struct dma_debug_entry * ref )
2009-05-27 04:43:03 +04:00
{
2009-06-12 17:25:06 +04:00
struct dma_debug_entry * entry ;
2009-05-27 04:43:03 +04:00
struct hash_bucket * bucket ;
unsigned long flags ;
2009-06-08 17:19:29 +04:00
int mapped_ents ;
2009-05-27 04:43:03 +04:00
2009-06-12 17:25:06 +04:00
bucket = get_hash_bucket ( ref , & flags ) ;
entry = hash_bucket_find ( bucket , ref ) ;
2009-06-08 17:19:29 +04:00
mapped_ents = 0 ;
2009-05-27 04:43:03 +04:00
if ( entry )
mapped_ents = entry - > sg_mapped_ents ;
put_hash_bucket ( bucket , & flags ) ;
return mapped_ents ;
}
2009-01-09 16:19:54 +03:00
void debug_dma_unmap_sg ( struct device * dev , struct scatterlist * sglist ,
int nelems , int dir )
{
struct scatterlist * s ;
int mapped_ents = 0 , i ;
if ( unlikely ( global_disable ) )
return ;
for_each_sg ( sglist , s , nelems , i ) {
struct dma_debug_entry ref = {
. type = dma_debug_sg ,
. dev = dev ,
. paddr = sg_phys ( s ) ,
2009-05-27 04:43:01 +04:00
. dev_addr = sg_dma_address ( s ) ,
2009-05-27 04:43:02 +04:00
. size = sg_dma_len ( s ) ,
2009-01-09 16:19:54 +03:00
. direction = dir ,
2009-06-11 12:03:42 +04:00
. sg_call_ents = nelems ,
2009-01-09 16:19:54 +03:00
} ;
if ( mapped_ents & & i > = mapped_ents )
break ;
2009-06-11 12:03:42 +04:00
if ( ! i )
2009-06-12 17:25:06 +04:00
mapped_ents = get_nr_mapped_entries ( dev , & ref ) ;
2009-01-09 16:19:54 +03:00
check_unmap ( & ref ) ;
}
}
EXPORT_SYMBOL ( debug_dma_unmap_sg ) ;
2009-01-09 16:38:50 +03:00
void debug_dma_alloc_coherent ( struct device * dev , size_t size ,
dma_addr_t dma_addr , void * virt )
{
struct dma_debug_entry * entry ;
if ( unlikely ( global_disable ) )
return ;
if ( unlikely ( virt = = NULL ) )
return ;
entry = dma_entry_alloc ( ) ;
if ( ! entry )
return ;
entry - > type = dma_debug_coherent ;
entry - > dev = dev ;
entry - > paddr = virt_to_phys ( virt ) ;
entry - > size = size ;
entry - > dev_addr = dma_addr ;
entry - > direction = DMA_BIDIRECTIONAL ;
add_dma_entry ( entry ) ;
}
EXPORT_SYMBOL ( debug_dma_alloc_coherent ) ;
void debug_dma_free_coherent ( struct device * dev , size_t size ,
void * virt , dma_addr_t addr )
{
struct dma_debug_entry ref = {
. type = dma_debug_coherent ,
. dev = dev ,
. paddr = virt_to_phys ( virt ) ,
. dev_addr = addr ,
. size = size ,
. direction = DMA_BIDIRECTIONAL ,
} ;
if ( unlikely ( global_disable ) )
return ;
check_unmap ( & ref ) ;
}
EXPORT_SYMBOL ( debug_dma_free_coherent ) ;
2009-01-09 16:43:04 +03:00
void debug_dma_sync_single_for_cpu ( struct device * dev , dma_addr_t dma_handle ,
size_t size , int direction )
{
2009-06-12 17:25:06 +04:00
struct dma_debug_entry ref ;
2009-01-09 16:43:04 +03:00
if ( unlikely ( global_disable ) )
return ;
2009-06-12 17:25:06 +04:00
ref . type = dma_debug_single ;
ref . dev = dev ;
ref . dev_addr = dma_handle ;
ref . size = size ;
ref . direction = direction ;
ref . sg_call_ents = 0 ;
check_sync ( dev , & ref , true ) ;
2009-01-09 16:43:04 +03:00
}
EXPORT_SYMBOL ( debug_dma_sync_single_for_cpu ) ;
void debug_dma_sync_single_for_device ( struct device * dev ,
dma_addr_t dma_handle , size_t size ,
int direction )
{
2009-06-12 17:25:06 +04:00
struct dma_debug_entry ref ;
2009-01-09 16:43:04 +03:00
if ( unlikely ( global_disable ) )
return ;
2009-06-12 17:25:06 +04:00
ref . type = dma_debug_single ;
ref . dev = dev ;
ref . dev_addr = dma_handle ;
ref . size = size ;
ref . direction = direction ;
ref . sg_call_ents = 0 ;
check_sync ( dev , & ref , false ) ;
2009-01-09 16:43:04 +03:00
}
EXPORT_SYMBOL ( debug_dma_sync_single_for_device ) ;
2009-01-09 16:55:38 +03:00
void debug_dma_sync_single_range_for_cpu ( struct device * dev ,
dma_addr_t dma_handle ,
unsigned long offset , size_t size ,
int direction )
{
2009-06-12 17:25:06 +04:00
struct dma_debug_entry ref ;
2009-01-09 16:55:38 +03:00
if ( unlikely ( global_disable ) )
return ;
2009-06-12 17:25:06 +04:00
ref . type = dma_debug_single ;
ref . dev = dev ;
ref . dev_addr = dma_handle ;
ref . size = offset + size ;
ref . direction = direction ;
ref . sg_call_ents = 0 ;
check_sync ( dev , & ref , true ) ;
2009-01-09 16:55:38 +03:00
}
EXPORT_SYMBOL ( debug_dma_sync_single_range_for_cpu ) ;
void debug_dma_sync_single_range_for_device ( struct device * dev ,
dma_addr_t dma_handle ,
unsigned long offset ,
size_t size , int direction )
{
2009-06-12 17:25:06 +04:00
struct dma_debug_entry ref ;
2009-01-09 16:55:38 +03:00
if ( unlikely ( global_disable ) )
return ;
2009-06-12 17:25:06 +04:00
ref . type = dma_debug_single ;
ref . dev = dev ;
ref . dev_addr = dma_handle ;
ref . size = offset + size ;
ref . direction = direction ;
ref . sg_call_ents = 0 ;
check_sync ( dev , & ref , false ) ;
2009-01-09 16:55:38 +03:00
}
EXPORT_SYMBOL ( debug_dma_sync_single_range_for_device ) ;
2009-01-09 17:01:12 +03:00
void debug_dma_sync_sg_for_cpu ( struct device * dev , struct scatterlist * sg ,
int nelems , int direction )
{
struct scatterlist * s ;
2009-05-27 04:43:03 +04:00
int mapped_ents = 0 , i ;
2009-01-09 17:01:12 +03:00
if ( unlikely ( global_disable ) )
return ;
for_each_sg ( sg , s , nelems , i ) {
2009-06-12 17:25:06 +04:00
struct dma_debug_entry ref = {
. type = dma_debug_sg ,
. dev = dev ,
. paddr = sg_phys ( s ) ,
. dev_addr = sg_dma_address ( s ) ,
. size = sg_dma_len ( s ) ,
. direction = direction ,
. sg_call_ents = nelems ,
} ;
2009-05-27 04:43:03 +04:00
if ( ! i )
2009-06-12 17:25:06 +04:00
mapped_ents = get_nr_mapped_entries ( dev , & ref ) ;
2009-05-27 04:43:03 +04:00
if ( i > = mapped_ents )
break ;
2009-06-12 17:25:06 +04:00
check_sync ( dev , & ref , true ) ;
2009-01-09 17:01:12 +03:00
}
}
EXPORT_SYMBOL ( debug_dma_sync_sg_for_cpu ) ;
void debug_dma_sync_sg_for_device ( struct device * dev , struct scatterlist * sg ,
int nelems , int direction )
{
struct scatterlist * s ;
2009-05-27 04:43:03 +04:00
int mapped_ents = 0 , i ;
2009-01-09 17:01:12 +03:00
if ( unlikely ( global_disable ) )
return ;
for_each_sg ( sg , s , nelems , i ) {
2009-06-12 17:25:06 +04:00
struct dma_debug_entry ref = {
. type = dma_debug_sg ,
. dev = dev ,
. paddr = sg_phys ( s ) ,
. dev_addr = sg_dma_address ( s ) ,
. size = sg_dma_len ( s ) ,
. direction = direction ,
. sg_call_ents = nelems ,
} ;
2009-05-27 04:43:03 +04:00
if ( ! i )
2009-06-12 17:25:06 +04:00
mapped_ents = get_nr_mapped_entries ( dev , & ref ) ;
2009-05-27 04:43:03 +04:00
if ( i > = mapped_ents )
break ;
2009-06-12 17:25:06 +04:00
check_sync ( dev , & ref , false ) ;
2009-01-09 17:01:12 +03:00
}
}
EXPORT_SYMBOL ( debug_dma_sync_sg_for_device ) ;
2009-05-22 23:49:51 +04:00
static int __init dma_debug_driver_setup ( char * str )
{
int i ;
for ( i = 0 ; i < NAME_MAX_LEN - 1 ; + + i , + + str ) {
current_driver_name [ i ] = * str ;
if ( * str = = 0 )
break ;
}
if ( current_driver_name [ 0 ] )
2009-06-08 17:39:24 +04:00
pr_info ( " DMA-API: enable driver filter for driver [%s] \n " ,
current_driver_name ) ;
2009-05-22 23:49:51 +04:00
return 1 ;
}
__setup ( " dma_debug_driver= " , dma_debug_driver_setup ) ;