2011-07-21 01:56:53 +04:00
/*
* Register map access API - debugfs
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author : Mark Brown < broonie @ opensource . wolfsonmicro . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/slab.h>
# include <linux/mutex.h>
# include <linux/debugfs.h>
# include <linux/uaccess.h>
2012-01-22 20:23:42 +04:00
# include <linux/device.h>
2013-10-24 16:03:41 +04:00
# include <linux/list.h>
2011-07-21 01:56:53 +04:00
# include "internal.h"
2013-10-24 16:03:41 +04:00
struct regmap_debugfs_node {
struct regmap * map ;
const char * name ;
struct list_head link ;
} ;
2011-07-21 01:56:53 +04:00
static struct dentry * regmap_debugfs_root ;
2013-10-24 16:03:41 +04:00
static LIST_HEAD ( regmap_debugfs_early_list ) ;
static DEFINE_MUTEX ( regmap_debugfs_early_lock ) ;
2011-07-21 01:56:53 +04:00
2011-08-10 12:15:31 +04:00
/* Calculate the length of a fixed format */
2015-09-19 17:31:47 +03:00
static size_t regmap_calc_reg_len ( int max_val )
2011-08-10 12:15:31 +04:00
{
2015-09-19 17:12:34 +03:00
return snprintf ( NULL , 0 , " %x " , max_val ) ;
2011-08-10 12:15:31 +04:00
}
2012-02-22 18:20:09 +04:00
static ssize_t regmap_name_read_file ( struct file * file ,
char __user * user_buf , size_t count ,
loff_t * ppos )
{
struct regmap * map = file - > private_data ;
2018-02-20 00:43:01 +03:00
const char * name = " nodev " ;
2012-02-22 18:20:09 +04:00
int ret ;
char * buf ;
buf = kmalloc ( PAGE_SIZE , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
2018-02-20 00:43:01 +03:00
if ( map - > dev & & map - > dev - > driver )
name = map - > dev - > driver - > name ;
ret = snprintf ( buf , PAGE_SIZE , " %s \n " , name ) ;
2012-02-22 18:20:09 +04:00
if ( ret < 0 ) {
kfree ( buf ) ;
return ret ;
}
ret = simple_read_from_buffer ( user_buf , count , ppos , buf , ret ) ;
kfree ( buf ) ;
return ret ;
}
static const struct file_operations regmap_name_fops = {
2012-04-06 01:25:11 +04:00
. open = simple_open ,
2012-02-22 18:20:09 +04:00
. read = regmap_name_read_file ,
. llseek = default_llseek ,
} ;
2013-01-08 17:35:58 +04:00
static void regmap_debugfs_free_dump_cache ( struct regmap * map )
{
struct regmap_debugfs_off_cache * c ;
while ( ! list_empty ( & map - > debugfs_off_cache ) ) {
c = list_first_entry ( & map - > debugfs_off_cache ,
struct regmap_debugfs_off_cache ,
list ) ;
list_del ( & c - > list ) ;
kfree ( c ) ;
}
}
2016-08-08 18:44:22 +03:00
static bool regmap_printable ( struct regmap * map , unsigned int reg )
{
if ( regmap_precious ( map , reg ) )
return false ;
if ( ! regmap_readable ( map , reg ) & & ! regmap_cached ( map , reg ) )
return false ;
return true ;
}
2012-12-09 12:20:10 +04:00
/*
* Work out where the start offset maps into register numbers , bearing
* in mind that we suppress hidden registers .
*/
static unsigned int regmap_debugfs_get_dump_start ( struct regmap * map ,
unsigned int base ,
loff_t from ,
loff_t * pos )
{
2012-12-10 20:24:29 +04:00
struct regmap_debugfs_off_cache * c = NULL ;
loff_t p = 0 ;
unsigned int i , ret ;
2013-02-08 16:47:14 +04:00
unsigned int fpos_offset ;
unsigned int reg_offset ;
2012-12-10 20:24:29 +04:00
2013-05-29 18:54:54 +04:00
/* Suppress the cache if we're using a subrange */
2013-08-28 19:55:07 +04:00
if ( base )
return base ;
2013-05-29 18:54:54 +04:00
2012-12-10 20:24:29 +04:00
/*
* If we don ' t have a cache build one so we don ' t have to do a
* linear scan each time .
*/
2013-02-20 16:15:23 +04:00
mutex_lock ( & map - > cache_lock ) ;
2013-02-20 16:15:22 +04:00
i = base ;
2012-12-10 20:24:29 +04:00
if ( list_empty ( & map - > debugfs_off_cache ) ) {
2013-02-20 16:15:22 +04:00
for ( ; i < = map - > max_register ; i + = map - > reg_stride ) {
2012-12-10 20:24:29 +04:00
/* Skip unprinted registers, closing off cache entry */
2016-08-08 18:44:22 +03:00
if ( ! regmap_printable ( map , i ) ) {
2012-12-10 20:24:29 +04:00
if ( c ) {
c - > max = p - 1 ;
2013-02-20 16:15:22 +04:00
c - > max_reg = i - map - > reg_stride ;
2012-12-10 20:24:29 +04:00
list_add_tail ( & c - > list ,
& map - > debugfs_off_cache ) ;
c = NULL ;
}
continue ;
}
/* No cache entry? Start a new one */
if ( ! c ) {
c = kzalloc ( sizeof ( * c ) , GFP_KERNEL ) ;
2013-01-08 17:35:58 +04:00
if ( ! c ) {
regmap_debugfs_free_dump_cache ( map ) ;
2013-02-20 16:15:23 +04:00
mutex_unlock ( & map - > cache_lock ) ;
2013-01-08 17:35:58 +04:00
return base ;
}
2012-12-10 20:24:29 +04:00
c - > min = p ;
c - > base_reg = i ;
}
p + = map - > debugfs_tot_len ;
}
}
2012-12-09 12:20:10 +04:00
2013-01-08 22:47:52 +04:00
/* Close the last entry off if we didn't scan beyond it */
if ( c ) {
c - > max = p - 1 ;
2013-02-20 16:15:22 +04:00
c - > max_reg = i - map - > reg_stride ;
2013-01-08 22:47:52 +04:00
list_add_tail ( & c - > list ,
& map - > debugfs_off_cache ) ;
}
2013-01-08 17:44:50 +04:00
/*
* This should never happen ; we return above if we fail to
* allocate and we should never be in this code if there are
* no registers at all .
*/
2013-01-26 15:45:35 +04:00
WARN_ON ( list_empty ( & map - > debugfs_off_cache ) ) ;
ret = base ;
2013-01-08 17:44:50 +04:00
2013-02-08 16:47:20 +04:00
/* Find the relevant block:offset */
2012-12-10 20:24:29 +04:00
list_for_each_entry ( c , & map - > debugfs_off_cache , list ) {
2013-01-09 00:40:19 +04:00
if ( from > = c - > min & & from < = c - > max ) {
2013-02-08 16:47:20 +04:00
fpos_offset = from - c - > min ;
reg_offset = fpos_offset / map - > debugfs_tot_len ;
* pos = c - > min + ( reg_offset * map - > debugfs_tot_len ) ;
2013-02-20 16:15:23 +04:00
mutex_unlock ( & map - > cache_lock ) ;
2013-05-14 10:54:23 +04:00
return c - > base_reg + ( reg_offset * map - > reg_stride ) ;
2012-12-09 12:20:10 +04:00
}
2013-02-08 16:47:20 +04:00
* pos = c - > max ;
ret = c - > max_reg ;
2012-12-09 12:20:10 +04:00
}
2013-02-20 16:15:23 +04:00
mutex_unlock ( & map - > cache_lock ) ;
2012-12-09 12:20:10 +04:00
2012-12-10 20:24:29 +04:00
return ret ;
2012-12-09 12:20:10 +04:00
}
2013-02-11 14:50:59 +04:00
static inline void regmap_calc_tot_len ( struct regmap * map ,
void * buf , size_t count )
{
/* Calculate the length of a fixed format */
if ( ! map - > debugfs_tot_len ) {
2015-09-19 17:31:47 +03:00
map - > debugfs_reg_len = regmap_calc_reg_len ( map - > max_register ) ,
2013-02-11 14:50:59 +04:00
map - > debugfs_val_len = 2 * map - > format . val_bytes ;
map - > debugfs_tot_len = map - > debugfs_reg_len +
map - > debugfs_val_len + 3 ; /* : \n */
}
}
2012-10-03 15:45:37 +04:00
static ssize_t regmap_read_debugfs ( struct regmap * map , unsigned int from ,
unsigned int to , char __user * user_buf ,
size_t count , loff_t * ppos )
2011-07-21 01:56:53 +04:00
{
size_t buf_pos = 0 ;
2012-12-09 12:20:10 +04:00
loff_t p = * ppos ;
2011-07-21 01:56:53 +04:00
ssize_t ret ;
int i ;
char * buf ;
2012-12-09 12:20:10 +04:00
unsigned int val , start_reg ;
2011-07-21 01:56:53 +04:00
if ( * ppos < 0 | | ! count )
return - EINVAL ;
buf = kmalloc ( count , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
2013-02-11 14:50:59 +04:00
regmap_calc_tot_len ( map , buf , count ) ;
2011-07-21 01:56:53 +04:00
2012-12-09 12:20:10 +04:00
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start ( map , from , * ppos , & p ) ;
for ( i = start_reg ; i < = to ; i + = map - > reg_stride ) {
2016-08-08 18:44:22 +03:00
if ( ! regmap_readable ( map , i ) & & ! regmap_cached ( map , i ) )
2011-07-21 01:56:53 +04:00
continue ;
2011-08-10 12:14:41 +04:00
if ( regmap_precious ( map , i ) )
2011-08-08 10:41:46 +04:00
continue ;
2011-07-21 01:56:53 +04:00
/* If we're in the region the user is trying to read */
if ( p > = * ppos ) {
/* ...but not beyond it */
2013-02-07 14:51:56 +04:00
if ( buf_pos + map - > debugfs_tot_len > count )
2011-07-21 01:56:53 +04:00
break ;
/* Format the register */
snprintf ( buf + buf_pos , count - buf_pos , " %.*x: " ,
2012-12-06 08:29:05 +04:00
map - > debugfs_reg_len , i - from ) ;
buf_pos + = map - > debugfs_reg_len + 2 ;
2011-07-21 01:56:53 +04:00
/* Format the value, write all X if we can't read */
ret = regmap_read ( map , i , & val ) ;
if ( ret = = 0 )
snprintf ( buf + buf_pos , count - buf_pos ,
2012-12-06 08:29:05 +04:00
" %.*x " , map - > debugfs_val_len , val ) ;
2011-07-21 01:56:53 +04:00
else
2012-12-06 08:29:05 +04:00
memset ( buf + buf_pos , ' X ' ,
map - > debugfs_val_len ) ;
2011-07-21 01:56:53 +04:00
buf_pos + = 2 * map - > format . val_bytes ;
buf [ buf_pos + + ] = ' \n ' ;
}
2012-12-06 08:29:05 +04:00
p + = map - > debugfs_tot_len ;
2011-07-21 01:56:53 +04:00
}
ret = buf_pos ;
if ( copy_to_user ( user_buf , buf , buf_pos ) ) {
ret = - EFAULT ;
goto out ;
}
* ppos + = buf_pos ;
out :
kfree ( buf ) ;
return ret ;
}
2012-10-03 15:45:37 +04:00
static ssize_t regmap_map_read_file ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap * map = file - > private_data ;
return regmap_read_debugfs ( map , 0 , map - > max_register , user_buf ,
count , ppos ) ;
}
2012-02-22 16:43:50 +04:00
# undef REGMAP_ALLOW_WRITE_DEBUGFS
# ifdef REGMAP_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous especially when we have clients such as
* PMICs , therefore don ' t provide any real compile time configuration option
* for this feature , people who want to use this will need to modify
* the source code directly .
*/
static ssize_t regmap_map_write_file ( struct file * file ,
const char __user * user_buf ,
size_t count , loff_t * ppos )
{
char buf [ 32 ] ;
size_t buf_size ;
char * start = buf ;
unsigned long reg , value ;
struct regmap * map = file - > private_data ;
2013-05-09 15:46:41 +04:00
int ret ;
2012-02-22 16:43:50 +04:00
buf_size = min ( count , ( sizeof ( buf ) - 1 ) ) ;
if ( copy_from_user ( buf , user_buf , buf_size ) )
return - EFAULT ;
buf [ buf_size ] = 0 ;
while ( * start = = ' ' )
start + + ;
reg = simple_strtoul ( start , & start , 16 ) ;
while ( * start = = ' ' )
start + + ;
2013-07-26 08:10:22 +04:00
if ( kstrtoul ( start , 16 , & value ) )
2012-02-22 16:43:50 +04:00
return - EINVAL ;
/* Userspace has been fiddling around behind the kernel's back */
2013-05-09 17:35:49 +04:00
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
2012-02-22 16:43:50 +04:00
2013-05-09 15:46:41 +04:00
ret = regmap_write ( map , reg , value ) ;
if ( ret < 0 )
return ret ;
2012-02-22 16:43:50 +04:00
return buf_size ;
}
# else
# define regmap_map_write_file NULL
# endif
2011-07-21 01:56:53 +04:00
static const struct file_operations regmap_map_fops = {
2012-04-06 01:25:11 +04:00
. open = simple_open ,
2011-07-21 01:56:53 +04:00
. read = regmap_map_read_file ,
2012-02-22 16:43:50 +04:00
. write = regmap_map_write_file ,
2011-07-21 01:56:53 +04:00
. llseek = default_llseek ,
} ;
2012-10-03 16:13:16 +04:00
static ssize_t regmap_range_read_file ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap_range_node * range = file - > private_data ;
struct regmap * map = range - > map ;
return regmap_read_debugfs ( map , range - > range_min , range - > range_max ,
user_buf , count , ppos ) ;
}
static const struct file_operations regmap_range_fops = {
. open = simple_open ,
. read = regmap_range_read_file ,
. llseek = default_llseek ,
} ;
2013-02-20 16:15:23 +04:00
static ssize_t regmap_reg_ranges_read_file ( struct file * file ,
char __user * user_buf , size_t count ,
loff_t * ppos )
{
struct regmap * map = file - > private_data ;
struct regmap_debugfs_off_cache * c ;
loff_t p = 0 ;
size_t buf_pos = 0 ;
char * buf ;
char * entry ;
int ret ;
2015-09-30 21:30:25 +03:00
unsigned entry_len ;
2013-02-20 16:15:23 +04:00
if ( * ppos < 0 | | ! count )
return - EINVAL ;
buf = kmalloc ( count , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
entry = kmalloc ( PAGE_SIZE , GFP_KERNEL ) ;
if ( ! entry ) {
kfree ( buf ) ;
return - ENOMEM ;
}
/* While we are at it, build the register dump cache
* now so the read ( ) operation on the ` registers ' file
* can benefit from using the cache . We do not care
* about the file position information that is contained
* in the cache , just about the actual register blocks */
regmap_calc_tot_len ( map , buf , count ) ;
regmap_debugfs_get_dump_start ( map , 0 , * ppos , & p ) ;
/* Reset file pointer as the fixed-format of the `registers'
* file is not compatible with the ` range ' file */
p = 0 ;
mutex_lock ( & map - > cache_lock ) ;
list_for_each_entry ( c , & map - > debugfs_off_cache , list ) {
2015-09-30 21:30:27 +03:00
entry_len = snprintf ( entry , PAGE_SIZE , " %x-%x \n " ,
2015-09-30 21:30:25 +03:00
c - > base_reg , c - > max_reg ) ;
2013-02-20 16:15:23 +04:00
if ( p > = * ppos ) {
2015-09-30 21:30:27 +03:00
if ( buf_pos + entry_len > count )
2013-02-20 16:15:23 +04:00
break ;
2015-09-30 21:30:26 +03:00
memcpy ( buf + buf_pos , entry , entry_len ) ;
2015-09-30 21:30:25 +03:00
buf_pos + = entry_len ;
2013-02-20 16:15:23 +04:00
}
2015-09-30 21:30:27 +03:00
p + = entry_len ;
2013-02-20 16:15:23 +04:00
}
mutex_unlock ( & map - > cache_lock ) ;
kfree ( entry ) ;
ret = buf_pos ;
if ( copy_to_user ( user_buf , buf , buf_pos ) ) {
ret = - EFAULT ;
goto out_buf ;
}
* ppos + = buf_pos ;
out_buf :
kfree ( buf ) ;
return ret ;
}
static const struct file_operations regmap_reg_ranges_fops = {
. open = simple_open ,
. read = regmap_reg_ranges_read_file ,
. llseek = default_llseek ,
} ;
2015-10-20 17:40:59 +03:00
static int regmap_access_show ( struct seq_file * s , void * ignored )
2011-08-10 12:28:04 +04:00
{
2015-10-20 17:40:59 +03:00
struct regmap * map = s - > private ;
int i , reg_len ;
2011-08-10 12:28:04 +04:00
2015-09-19 17:31:47 +03:00
reg_len = regmap_calc_reg_len ( map - > max_register ) ;
2011-08-10 12:28:04 +04:00
2012-04-09 23:40:24 +04:00
for ( i = 0 ; i < = map - > max_register ; i + = map - > reg_stride ) {
2011-08-10 12:28:04 +04:00
/* Ignore registers which are neither readable nor writable */
if ( ! regmap_readable ( map , i ) & & ! regmap_writeable ( map , i ) )
continue ;
2015-10-20 17:40:59 +03:00
/* Format the register */
seq_printf ( s , " %.*x: %c %c %c %c \n " , reg_len , i ,
regmap_readable ( map , i ) ? ' y ' : ' n ' ,
regmap_writeable ( map , i ) ? ' y ' : ' n ' ,
regmap_volatile ( map , i ) ? ' y ' : ' n ' ,
regmap_precious ( map , i ) ? ' y ' : ' n ' ) ;
2011-08-10 12:28:04 +04:00
}
2015-10-20 17:40:59 +03:00
return 0 ;
}
2011-08-10 12:28:04 +04:00
2015-10-20 17:40:59 +03:00
static int access_open ( struct inode * inode , struct file * file )
{
return single_open ( file , regmap_access_show , inode - > i_private ) ;
2011-08-10 12:28:04 +04:00
}
static const struct file_operations regmap_access_fops = {
2015-10-20 17:40:59 +03:00
. open = access_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
2011-08-10 12:28:04 +04:00
} ;
2011-07-21 01:56:53 +04:00
2015-06-23 16:32:55 +03:00
static ssize_t regmap_cache_only_write_file ( struct file * file ,
const char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap * map = container_of ( file - > private_data ,
struct regmap , cache_only ) ;
ssize_t result ;
bool was_enabled , require_sync = false ;
int err ;
map - > lock ( map - > lock_arg ) ;
was_enabled = map - > cache_only ;
result = debugfs_write_file_bool ( file , user_buf , count , ppos ) ;
if ( result < 0 ) {
map - > unlock ( map - > lock_arg ) ;
return result ;
}
if ( map - > cache_only & & ! was_enabled ) {
dev_warn ( map - > dev , " debugfs cache_only=Y forced \n " ) ;
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
} else if ( ! map - > cache_only & & was_enabled ) {
dev_warn ( map - > dev , " debugfs cache_only=N forced: syncing cache \n " ) ;
require_sync = true ;
}
map - > unlock ( map - > lock_arg ) ;
if ( require_sync ) {
err = regcache_sync ( map ) ;
if ( err )
dev_err ( map - > dev , " Failed to sync cache %d \n " , err ) ;
}
return result ;
}
static const struct file_operations regmap_cache_only_fops = {
. open = simple_open ,
. read = debugfs_read_file_bool ,
. write = regmap_cache_only_write_file ,
} ;
static ssize_t regmap_cache_bypass_write_file ( struct file * file ,
const char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap * map = container_of ( file - > private_data ,
struct regmap , cache_bypass ) ;
ssize_t result ;
bool was_enabled ;
map - > lock ( map - > lock_arg ) ;
was_enabled = map - > cache_bypass ;
result = debugfs_write_file_bool ( file , user_buf , count , ppos ) ;
if ( result < 0 )
goto out ;
if ( map - > cache_bypass & & ! was_enabled ) {
dev_warn ( map - > dev , " debugfs cache_bypass=Y forced \n " ) ;
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
} else if ( ! map - > cache_bypass & & was_enabled ) {
dev_warn ( map - > dev , " debugfs cache_bypass=N forced \n " ) ;
}
out :
map - > unlock ( map - > lock_arg ) ;
return result ;
}
static const struct file_operations regmap_cache_bypass_fops = {
. open = simple_open ,
. read = debugfs_read_file_bool ,
. write = regmap_cache_bypass_write_file ,
} ;
2012-04-05 01:48:29 +04:00
void regmap_debugfs_init ( struct regmap * map , const char * name )
2011-07-21 01:56:53 +04:00
{
2012-10-03 16:13:16 +04:00
struct rb_node * next ;
struct regmap_range_node * range_node ;
2014-09-28 07:35:25 +04:00
const char * devname = " dummy " ;
2012-10-03 16:13:16 +04:00
2017-12-22 20:42:08 +03:00
/*
* Userspace can initiate reads from the hardware over debugfs .
* Normally internal regmap structures and buffers are protected with
* a mutex or a spinlock , but if the regmap owner decided to disable
* all locking mechanisms , this is no longer the case . For safety :
* don ' t create the debugfs entries if locking is disabled .
*/
2017-12-21 14:12:50 +03:00
if ( map - > debugfs_disable ) {
dev_dbg ( map - > dev , " regmap locking disabled - not creating debugfs entries \n " ) ;
2017-12-12 19:56:43 +03:00
return ;
2017-12-21 14:12:50 +03:00
}
2017-12-12 19:56:43 +03:00
2013-10-24 16:03:41 +04:00
/* If we don't have the debugfs root yet, postpone init */
if ( ! regmap_debugfs_root ) {
struct regmap_debugfs_node * node ;
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( ! node )
return ;
node - > map = map ;
node - > name = name ;
mutex_lock ( & regmap_debugfs_early_lock ) ;
list_add ( & node - > link , & regmap_debugfs_early_list ) ;
mutex_unlock ( & regmap_debugfs_early_lock ) ;
return ;
}
2012-12-10 20:24:29 +04:00
INIT_LIST_HEAD ( & map - > debugfs_off_cache ) ;
2013-02-20 16:15:23 +04:00
mutex_init ( & map - > cache_lock ) ;
2012-12-10 20:24:29 +04:00
2014-09-28 07:35:25 +04:00
if ( map - > dev )
devname = dev_name ( map - > dev ) ;
2012-04-05 01:48:29 +04:00
if ( name ) {
map - > debugfs_name = kasprintf ( GFP_KERNEL , " %s-%s " ,
2014-09-28 07:35:25 +04:00
devname , name ) ;
2012-04-05 01:48:29 +04:00
name = map - > debugfs_name ;
} else {
2014-09-28 07:35:25 +04:00
name = devname ;
2012-04-05 01:48:29 +04:00
}
map - > debugfs = debugfs_create_dir ( name , regmap_debugfs_root ) ;
2011-07-21 01:56:53 +04:00
if ( ! map - > debugfs ) {
dev_warn ( map - > dev , " Failed to create debugfs directory \n " ) ;
return ;
}
2012-02-22 18:20:09 +04:00
debugfs_create_file ( " name " , 0400 , map - > debugfs ,
map , & regmap_name_fops ) ;
2013-02-20 16:15:23 +04:00
debugfs_create_file ( " range " , 0400 , map - > debugfs ,
map , & regmap_reg_ranges_fops ) ;
2014-01-30 17:26:32 +04:00
if ( map - > max_register | | regmap_readable ( map , 0 ) ) {
2014-09-08 10:43:37 +04:00
umode_t registers_mode ;
2015-08-06 05:35:23 +03:00
# if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
registers_mode = 0600 ;
# else
registers_mode = 0400 ;
# endif
2014-09-08 10:43:37 +04:00
debugfs_create_file ( " registers " , registers_mode , map - > debugfs ,
2011-07-21 01:56:53 +04:00
map , & regmap_map_fops ) ;
2011-08-10 12:28:04 +04:00
debugfs_create_file ( " access " , 0400 , map - > debugfs ,
map , & regmap_access_fops ) ;
}
2012-02-06 22:02:06 +04:00
if ( map - > cache_type ) {
2015-06-23 16:32:55 +03:00
debugfs_create_file ( " cache_only " , 0600 , map - > debugfs ,
& map - > cache_only , & regmap_cache_only_fops ) ;
2012-02-06 22:02:06 +04:00
debugfs_create_bool ( " cache_dirty " , 0400 , map - > debugfs ,
& map - > cache_dirty ) ;
2015-06-23 16:32:55 +03:00
debugfs_create_file ( " cache_bypass " , 0600 , map - > debugfs ,
& map - > cache_bypass ,
& regmap_cache_bypass_fops ) ;
2012-02-06 22:02:06 +04:00
}
2012-10-03 16:13:16 +04:00
next = rb_first ( & map - > range_tree ) ;
while ( next ) {
range_node = rb_entry ( next , struct regmap_range_node , node ) ;
if ( range_node - > name )
debugfs_create_file ( range_node - > name , 0400 ,
map - > debugfs , range_node ,
& regmap_range_fops ) ;
next = rb_next ( & range_node - > node ) ;
}
2014-08-24 17:32:27 +04:00
if ( map - > cache_ops & & map - > cache_ops - > debugfs_init )
map - > cache_ops - > debugfs_init ( map ) ;
2011-07-21 01:56:53 +04:00
}
void regmap_debugfs_exit ( struct regmap * map )
{
2013-10-24 16:03:41 +04:00
if ( map - > debugfs ) {
debugfs_remove_recursive ( map - > debugfs ) ;
mutex_lock ( & map - > cache_lock ) ;
regmap_debugfs_free_dump_cache ( map ) ;
mutex_unlock ( & map - > cache_lock ) ;
kfree ( map - > debugfs_name ) ;
} else {
struct regmap_debugfs_node * node , * tmp ;
mutex_lock ( & regmap_debugfs_early_lock ) ;
list_for_each_entry_safe ( node , tmp , & regmap_debugfs_early_list ,
link ) {
if ( node - > map = = map ) {
list_del ( & node - > link ) ;
kfree ( node ) ;
}
}
mutex_unlock ( & regmap_debugfs_early_lock ) ;
}
2011-07-21 01:56:53 +04:00
}
void regmap_debugfs_initcall ( void )
{
2013-10-24 16:03:41 +04:00
struct regmap_debugfs_node * node , * tmp ;
2011-07-21 01:56:53 +04:00
regmap_debugfs_root = debugfs_create_dir ( " regmap " , NULL ) ;
if ( ! regmap_debugfs_root ) {
pr_warn ( " regmap: Failed to create debugfs root \n " ) ;
return ;
}
2013-10-24 16:03:41 +04:00
mutex_lock ( & regmap_debugfs_early_lock ) ;
list_for_each_entry_safe ( node , tmp , & regmap_debugfs_early_list , link ) {
regmap_debugfs_init ( node - > map , node - > name ) ;
list_del ( & node - > link ) ;
kfree ( node ) ;
}
mutex_unlock ( & regmap_debugfs_early_lock ) ;
2011-07-21 01:56:53 +04:00
}