2019-04-25 20:06:18 +02:00
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - debugfs
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
2011-07-20 22:56:53 +01:00
# include <linux/slab.h>
# include <linux/mutex.h>
# include <linux/debugfs.h>
# include <linux/uaccess.h>
2012-01-22 11:23:42 -05:00
# include <linux/device.h>
2013-10-24 15:03:41 +03:00
# include <linux/list.h>
2011-07-20 22:56:53 +01:00
# include "internal.h"
2013-10-24 15:03:41 +03:00
struct regmap_debugfs_node {
struct regmap * map ;
const char * name ;
struct list_head link ;
} ;
2018-03-05 15:52:09 -03:00
static unsigned int dummy_index ;
2011-07-20 22:56:53 +01:00
static struct dentry * regmap_debugfs_root ;
2013-10-24 15:03:41 +03:00
static LIST_HEAD ( regmap_debugfs_early_list ) ;
static DEFINE_MUTEX ( regmap_debugfs_early_lock ) ;
2011-07-20 22:56:53 +01:00
2011-08-10 17:15:31 +09:00
/* Calculate the length of a fixed format */
2015-09-19 07:31:47 -07:00
static size_t regmap_calc_reg_len ( int max_val )
2011-08-10 17:15:31 +09:00
{
2015-09-19 07:12:34 -07:00
return snprintf ( NULL , 0 , " %x " , max_val ) ;
2011-08-10 17:15:31 +09:00
}
2012-02-22 14:20:09 +00:00
static ssize_t regmap_name_read_file ( struct file * file ,
char __user * user_buf , size_t count ,
loff_t * ppos )
{
struct regmap * map = file - > private_data ;
2018-02-19 15:43:01 -06:00
const char * name = " nodev " ;
2012-02-22 14:20:09 +00:00
int ret ;
char * buf ;
buf = kmalloc ( PAGE_SIZE , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
2018-02-19 15:43:01 -06:00
if ( map - > dev & & map - > dev - > driver )
name = map - > dev - > driver - > name ;
ret = snprintf ( buf , PAGE_SIZE , " %s \n " , name ) ;
2012-02-22 14:20:09 +00:00
if ( ret < 0 ) {
kfree ( buf ) ;
return ret ;
}
ret = simple_read_from_buffer ( user_buf , count , ppos , buf , ret ) ;
kfree ( buf ) ;
return ret ;
}
static const struct file_operations regmap_name_fops = {
2012-04-05 14:25:11 -07:00
. open = simple_open ,
2012-02-22 14:20:09 +00:00
. read = regmap_name_read_file ,
. llseek = default_llseek ,
} ;
2013-01-08 13:35:58 +00:00
static void regmap_debugfs_free_dump_cache ( struct regmap * map )
{
struct regmap_debugfs_off_cache * c ;
while ( ! list_empty ( & map - > debugfs_off_cache ) ) {
c = list_first_entry ( & map - > debugfs_off_cache ,
struct regmap_debugfs_off_cache ,
list ) ;
list_del ( & c - > list ) ;
kfree ( c ) ;
}
}
2016-08-08 18:44:22 +03:00
static bool regmap_printable ( struct regmap * map , unsigned int reg )
{
if ( regmap_precious ( map , reg ) )
return false ;
if ( ! regmap_readable ( map , reg ) & & ! regmap_cached ( map , reg ) )
return false ;
return true ;
}
2012-12-09 17:20:10 +09:00
/*
* Work out where the start offset maps into register numbers , bearing
* in mind that we suppress hidden registers .
*/
static unsigned int regmap_debugfs_get_dump_start ( struct regmap * map ,
unsigned int base ,
loff_t from ,
loff_t * pos )
{
2012-12-11 01:24:29 +09:00
struct regmap_debugfs_off_cache * c = NULL ;
loff_t p = 0 ;
unsigned int i , ret ;
2013-02-08 12:47:14 +00:00
unsigned int fpos_offset ;
unsigned int reg_offset ;
2012-12-11 01:24:29 +09:00
2013-05-29 15:54:54 +01:00
/* Suppress the cache if we're using a subrange */
2013-08-28 17:55:07 +02:00
if ( base )
return base ;
2013-05-29 15:54:54 +01:00
2012-12-11 01:24:29 +09:00
/*
* If we don ' t have a cache build one so we don ' t have to do a
* linear scan each time .
*/
2013-02-20 12:15:23 +00:00
mutex_lock ( & map - > cache_lock ) ;
2013-02-20 12:15:22 +00:00
i = base ;
2012-12-11 01:24:29 +09:00
if ( list_empty ( & map - > debugfs_off_cache ) ) {
2013-02-20 12:15:22 +00:00
for ( ; i < = map - > max_register ; i + = map - > reg_stride ) {
2012-12-11 01:24:29 +09:00
/* Skip unprinted registers, closing off cache entry */
2016-08-08 18:44:22 +03:00
if ( ! regmap_printable ( map , i ) ) {
2012-12-11 01:24:29 +09:00
if ( c ) {
c - > max = p - 1 ;
2013-02-20 12:15:22 +00:00
c - > max_reg = i - map - > reg_stride ;
2012-12-11 01:24:29 +09:00
list_add_tail ( & c - > list ,
& map - > debugfs_off_cache ) ;
c = NULL ;
}
continue ;
}
/* No cache entry? Start a new one */
if ( ! c ) {
c = kzalloc ( sizeof ( * c ) , GFP_KERNEL ) ;
2013-01-08 13:35:58 +00:00
if ( ! c ) {
regmap_debugfs_free_dump_cache ( map ) ;
2013-02-20 12:15:23 +00:00
mutex_unlock ( & map - > cache_lock ) ;
2013-01-08 13:35:58 +00:00
return base ;
}
2012-12-11 01:24:29 +09:00
c - > min = p ;
c - > base_reg = i ;
}
p + = map - > debugfs_tot_len ;
}
}
2012-12-09 17:20:10 +09:00
2013-01-08 18:47:52 +00:00
/* Close the last entry off if we didn't scan beyond it */
if ( c ) {
c - > max = p - 1 ;
2013-02-20 12:15:22 +00:00
c - > max_reg = i - map - > reg_stride ;
2013-01-08 18:47:52 +00:00
list_add_tail ( & c - > list ,
& map - > debugfs_off_cache ) ;
}
2013-01-08 13:44:50 +00:00
/*
* This should never happen ; we return above if we fail to
* allocate and we should never be in this code if there are
* no registers at all .
*/
2013-01-26 11:45:35 +00:00
WARN_ON ( list_empty ( & map - > debugfs_off_cache ) ) ;
ret = base ;
2013-01-08 13:44:50 +00:00
2013-02-08 12:47:20 +00:00
/* Find the relevant block:offset */
2012-12-11 01:24:29 +09:00
list_for_each_entry ( c , & map - > debugfs_off_cache , list ) {
2013-01-08 20:40:19 +00:00
if ( from > = c - > min & & from < = c - > max ) {
2013-02-08 12:47:20 +00:00
fpos_offset = from - c - > min ;
reg_offset = fpos_offset / map - > debugfs_tot_len ;
* pos = c - > min + ( reg_offset * map - > debugfs_tot_len ) ;
2013-02-20 12:15:23 +00:00
mutex_unlock ( & map - > cache_lock ) ;
2013-05-14 07:54:23 +01:00
return c - > base_reg + ( reg_offset * map - > reg_stride ) ;
2012-12-09 17:20:10 +09:00
}
2013-02-08 12:47:20 +00:00
* pos = c - > max ;
ret = c - > max_reg ;
2012-12-09 17:20:10 +09:00
}
2013-02-20 12:15:23 +00:00
mutex_unlock ( & map - > cache_lock ) ;
2012-12-09 17:20:10 +09:00
2012-12-11 01:24:29 +09:00
return ret ;
2012-12-09 17:20:10 +09:00
}
2013-02-11 10:50:59 +00:00
static inline void regmap_calc_tot_len ( struct regmap * map ,
void * buf , size_t count )
{
/* Calculate the length of a fixed format */
if ( ! map - > debugfs_tot_len ) {
2015-09-19 07:31:47 -07:00
map - > debugfs_reg_len = regmap_calc_reg_len ( map - > max_register ) ,
2013-02-11 10:50:59 +00:00
map - > debugfs_val_len = 2 * map - > format . val_bytes ;
map - > debugfs_tot_len = map - > debugfs_reg_len +
map - > debugfs_val_len + 3 ; /* : \n */
}
}
2019-03-19 09:41:33 +00:00
static int regmap_next_readable_reg ( struct regmap * map , int reg )
{
struct regmap_debugfs_off_cache * c ;
int ret = - EINVAL ;
if ( regmap_printable ( map , reg + map - > reg_stride ) ) {
ret = reg + map - > reg_stride ;
} else {
mutex_lock ( & map - > cache_lock ) ;
list_for_each_entry ( c , & map - > debugfs_off_cache , list ) {
if ( reg > c - > max_reg )
continue ;
if ( reg < c - > base_reg ) {
ret = c - > base_reg ;
break ;
}
}
mutex_unlock ( & map - > cache_lock ) ;
}
return ret ;
}
2012-10-03 12:45:37 +01:00
static ssize_t regmap_read_debugfs ( struct regmap * map , unsigned int from ,
unsigned int to , char __user * user_buf ,
size_t count , loff_t * ppos )
2011-07-20 22:56:53 +01:00
{
size_t buf_pos = 0 ;
2012-12-09 17:20:10 +09:00
loff_t p = * ppos ;
2011-07-20 22:56:53 +01:00
ssize_t ret ;
int i ;
char * buf ;
2012-12-09 17:20:10 +09:00
unsigned int val , start_reg ;
2011-07-20 22:56:53 +01:00
if ( * ppos < 0 | | ! count )
return - EINVAL ;
2020-03-13 09:58:07 +08:00
if ( count > ( PAGE_SIZE < < ( MAX_ORDER - 1 ) ) )
count = PAGE_SIZE < < ( MAX_ORDER - 1 ) ;
2011-07-20 22:56:53 +01:00
buf = kmalloc ( count , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
2013-02-11 10:50:59 +00:00
regmap_calc_tot_len ( map , buf , count ) ;
2011-07-20 22:56:53 +01:00
2012-12-09 17:20:10 +09:00
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start ( map , from , * ppos , & p ) ;
2019-03-19 09:41:33 +00:00
for ( i = start_reg ; i > = 0 & & i < = to ;
i = regmap_next_readable_reg ( map , i ) ) {
2011-08-08 15:41:46 +09:00
2011-07-20 22:56:53 +01:00
/* If we're in the region the user is trying to read */
if ( p > = * ppos ) {
/* ...but not beyond it */
2013-02-07 10:51:56 +00:00
if ( buf_pos + map - > debugfs_tot_len > count )
2011-07-20 22:56:53 +01:00
break ;
/* Format the register */
snprintf ( buf + buf_pos , count - buf_pos , " %.*x: " ,
2012-12-06 13:29:05 +09:00
map - > debugfs_reg_len , i - from ) ;
buf_pos + = map - > debugfs_reg_len + 2 ;
2011-07-20 22:56:53 +01:00
/* Format the value, write all X if we can't read */
ret = regmap_read ( map , i , & val ) ;
if ( ret = = 0 )
snprintf ( buf + buf_pos , count - buf_pos ,
2012-12-06 13:29:05 +09:00
" %.*x " , map - > debugfs_val_len , val ) ;
2011-07-20 22:56:53 +01:00
else
2012-12-06 13:29:05 +09:00
memset ( buf + buf_pos , ' X ' ,
map - > debugfs_val_len ) ;
2011-07-20 22:56:53 +01:00
buf_pos + = 2 * map - > format . val_bytes ;
buf [ buf_pos + + ] = ' \n ' ;
}
2012-12-06 13:29:05 +09:00
p + = map - > debugfs_tot_len ;
2011-07-20 22:56:53 +01:00
}
ret = buf_pos ;
if ( copy_to_user ( user_buf , buf , buf_pos ) ) {
ret = - EFAULT ;
goto out ;
}
* ppos + = buf_pos ;
out :
kfree ( buf ) ;
return ret ;
}
2012-10-03 12:45:37 +01:00
static ssize_t regmap_map_read_file ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap * map = file - > private_data ;
return regmap_read_debugfs ( map , 0 , map - > max_register , user_buf ,
count , ppos ) ;
}
2012-02-22 12:43:50 +00:00
# undef REGMAP_ALLOW_WRITE_DEBUGFS
# ifdef REGMAP_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous especially when we have clients such as
* PMICs , therefore don ' t provide any real compile time configuration option
* for this feature , people who want to use this will need to modify
* the source code directly .
*/
static ssize_t regmap_map_write_file ( struct file * file ,
const char __user * user_buf ,
size_t count , loff_t * ppos )
{
char buf [ 32 ] ;
size_t buf_size ;
char * start = buf ;
unsigned long reg , value ;
struct regmap * map = file - > private_data ;
2013-05-09 12:46:41 +01:00
int ret ;
2012-02-22 12:43:50 +00:00
buf_size = min ( count , ( sizeof ( buf ) - 1 ) ) ;
if ( copy_from_user ( buf , user_buf , buf_size ) )
return - EFAULT ;
buf [ buf_size ] = 0 ;
while ( * start = = ' ' )
start + + ;
reg = simple_strtoul ( start , & start , 16 ) ;
while ( * start = = ' ' )
start + + ;
2013-07-26 13:10:22 +09:00
if ( kstrtoul ( start , 16 , & value ) )
2012-02-22 12:43:50 +00:00
return - EINVAL ;
/* Userspace has been fiddling around behind the kernel's back */
2013-05-09 14:35:49 +01:00
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
2012-02-22 12:43:50 +00:00
2013-05-09 12:46:41 +01:00
ret = regmap_write ( map , reg , value ) ;
if ( ret < 0 )
return ret ;
2012-02-22 12:43:50 +00:00
return buf_size ;
}
# else
# define regmap_map_write_file NULL
# endif
2011-07-20 22:56:53 +01:00
static const struct file_operations regmap_map_fops = {
2012-04-05 14:25:11 -07:00
. open = simple_open ,
2011-07-20 22:56:53 +01:00
. read = regmap_map_read_file ,
2012-02-22 12:43:50 +00:00
. write = regmap_map_write_file ,
2011-07-20 22:56:53 +01:00
. llseek = default_llseek ,
} ;
2012-10-03 13:13:16 +01:00
static ssize_t regmap_range_read_file ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap_range_node * range = file - > private_data ;
struct regmap * map = range - > map ;
return regmap_read_debugfs ( map , range - > range_min , range - > range_max ,
user_buf , count , ppos ) ;
}
static const struct file_operations regmap_range_fops = {
. open = simple_open ,
. read = regmap_range_read_file ,
. llseek = default_llseek ,
} ;
2013-02-20 12:15:23 +00:00
static ssize_t regmap_reg_ranges_read_file ( struct file * file ,
char __user * user_buf , size_t count ,
loff_t * ppos )
{
struct regmap * map = file - > private_data ;
struct regmap_debugfs_off_cache * c ;
loff_t p = 0 ;
size_t buf_pos = 0 ;
char * buf ;
char * entry ;
int ret ;
2015-09-30 20:30:25 +02:00
unsigned entry_len ;
2013-02-20 12:15:23 +00:00
if ( * ppos < 0 | | ! count )
return - EINVAL ;
2020-03-13 09:58:07 +08:00
if ( count > ( PAGE_SIZE < < ( MAX_ORDER - 1 ) ) )
count = PAGE_SIZE < < ( MAX_ORDER - 1 ) ;
2013-02-20 12:15:23 +00:00
buf = kmalloc ( count , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
entry = kmalloc ( PAGE_SIZE , GFP_KERNEL ) ;
if ( ! entry ) {
kfree ( buf ) ;
return - ENOMEM ;
}
/* While we are at it, build the register dump cache
* now so the read ( ) operation on the ` registers ' file
* can benefit from using the cache . We do not care
* about the file position information that is contained
* in the cache , just about the actual register blocks */
regmap_calc_tot_len ( map , buf , count ) ;
regmap_debugfs_get_dump_start ( map , 0 , * ppos , & p ) ;
/* Reset file pointer as the fixed-format of the `registers'
* file is not compatible with the ` range ' file */
p = 0 ;
mutex_lock ( & map - > cache_lock ) ;
list_for_each_entry ( c , & map - > debugfs_off_cache , list ) {
2015-09-30 20:30:27 +02:00
entry_len = snprintf ( entry , PAGE_SIZE , " %x-%x \n " ,
2015-09-30 20:30:25 +02:00
c - > base_reg , c - > max_reg ) ;
2013-02-20 12:15:23 +00:00
if ( p > = * ppos ) {
2015-09-30 20:30:27 +02:00
if ( buf_pos + entry_len > count )
2013-02-20 12:15:23 +00:00
break ;
2015-09-30 20:30:26 +02:00
memcpy ( buf + buf_pos , entry , entry_len ) ;
2015-09-30 20:30:25 +02:00
buf_pos + = entry_len ;
2013-02-20 12:15:23 +00:00
}
2015-09-30 20:30:27 +02:00
p + = entry_len ;
2013-02-20 12:15:23 +00:00
}
mutex_unlock ( & map - > cache_lock ) ;
kfree ( entry ) ;
ret = buf_pos ;
if ( copy_to_user ( user_buf , buf , buf_pos ) ) {
ret = - EFAULT ;
goto out_buf ;
}
* ppos + = buf_pos ;
out_buf :
kfree ( buf ) ;
return ret ;
}
static const struct file_operations regmap_reg_ranges_fops = {
. open = simple_open ,
. read = regmap_reg_ranges_read_file ,
. llseek = default_llseek ,
} ;
2015-10-20 15:40:59 +01:00
static int regmap_access_show ( struct seq_file * s , void * ignored )
2011-08-10 17:28:04 +09:00
{
2015-10-20 15:40:59 +01:00
struct regmap * map = s - > private ;
int i , reg_len ;
2011-08-10 17:28:04 +09:00
2015-09-19 07:31:47 -07:00
reg_len = regmap_calc_reg_len ( map - > max_register ) ;
2011-08-10 17:28:04 +09:00
2012-04-09 13:40:24 -06:00
for ( i = 0 ; i < = map - > max_register ; i + = map - > reg_stride ) {
2011-08-10 17:28:04 +09:00
/* Ignore registers which are neither readable nor writable */
if ( ! regmap_readable ( map , i ) & & ! regmap_writeable ( map , i ) )
continue ;
2015-10-20 15:40:59 +01:00
/* Format the register */
seq_printf ( s , " %.*x: %c %c %c %c \n " , reg_len , i ,
regmap_readable ( map , i ) ? ' y ' : ' n ' ,
regmap_writeable ( map , i ) ? ' y ' : ' n ' ,
regmap_volatile ( map , i ) ? ' y ' : ' n ' ,
regmap_precious ( map , i ) ? ' y ' : ' n ' ) ;
2011-08-10 17:28:04 +09:00
}
2015-10-20 15:40:59 +01:00
return 0 ;
}
2011-08-10 17:28:04 +09:00
2018-12-15 03:41:19 -05:00
DEFINE_SHOW_ATTRIBUTE ( regmap_access ) ;
2011-07-20 22:56:53 +01:00
2015-06-23 14:32:55 +01:00
static ssize_t regmap_cache_only_write_file ( struct file * file ,
const char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap * map = container_of ( file - > private_data ,
struct regmap , cache_only ) ;
2020-07-15 16:46:15 -07:00
bool new_val , require_sync = false ;
2015-06-23 14:32:55 +01:00
int err ;
2020-07-15 16:46:15 -07:00
err = kstrtobool_from_user ( user_buf , count , & new_val ) ;
/* Ignore malforned data like debugfs_write_file_bool() */
if ( err )
return count ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
err = debugfs_file_get ( file - > f_path . dentry ) ;
if ( err )
return err ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
map - > lock ( map - > lock_arg ) ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
if ( new_val & & ! map - > cache_only ) {
2015-06-23 14:32:55 +01:00
dev_warn ( map - > dev , " debugfs cache_only=Y forced \n " ) ;
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
2020-07-15 16:46:15 -07:00
} else if ( ! new_val & & map - > cache_only ) {
2015-06-23 14:32:55 +01:00
dev_warn ( map - > dev , " debugfs cache_only=N forced: syncing cache \n " ) ;
require_sync = true ;
}
2020-07-15 16:46:15 -07:00
map - > cache_only = new_val ;
2015-06-23 14:32:55 +01:00
map - > unlock ( map - > lock_arg ) ;
2020-07-15 16:46:15 -07:00
debugfs_file_put ( file - > f_path . dentry ) ;
2015-06-23 14:32:55 +01:00
if ( require_sync ) {
err = regcache_sync ( map ) ;
if ( err )
dev_err ( map - > dev , " Failed to sync cache %d \n " , err ) ;
}
2020-07-15 16:46:15 -07:00
return count ;
2015-06-23 14:32:55 +01:00
}
static const struct file_operations regmap_cache_only_fops = {
. open = simple_open ,
. read = debugfs_read_file_bool ,
. write = regmap_cache_only_write_file ,
} ;
static ssize_t regmap_cache_bypass_write_file ( struct file * file ,
const char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct regmap * map = container_of ( file - > private_data ,
struct regmap , cache_bypass ) ;
2020-07-15 16:46:15 -07:00
bool new_val ;
int err ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
err = kstrtobool_from_user ( user_buf , count , & new_val ) ;
/* Ignore malforned data like debugfs_write_file_bool() */
if ( err )
return count ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
err = debugfs_file_get ( file - > f_path . dentry ) ;
if ( err )
return err ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
map - > lock ( map - > lock_arg ) ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
if ( new_val & & ! map - > cache_bypass ) {
2015-06-23 14:32:55 +01:00
dev_warn ( map - > dev , " debugfs cache_bypass=Y forced \n " ) ;
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
2020-07-15 16:46:15 -07:00
} else if ( ! new_val & & map - > cache_bypass ) {
2015-06-23 14:32:55 +01:00
dev_warn ( map - > dev , " debugfs cache_bypass=N forced \n " ) ;
}
2020-07-15 16:46:15 -07:00
map - > cache_bypass = new_val ;
2015-06-23 14:32:55 +01:00
map - > unlock ( map - > lock_arg ) ;
2020-07-15 16:46:15 -07:00
debugfs_file_put ( file - > f_path . dentry ) ;
2015-06-23 14:32:55 +01:00
2020-07-15 16:46:15 -07:00
return count ;
2015-06-23 14:32:55 +01:00
}
static const struct file_operations regmap_cache_bypass_fops = {
. open = simple_open ,
. read = debugfs_read_file_bool ,
. write = regmap_cache_bypass_write_file ,
} ;
2012-04-04 15:48:29 -06:00
void regmap_debugfs_init ( struct regmap * map , const char * name )
2011-07-20 22:56:53 +01:00
{
2012-10-03 13:13:16 +01:00
struct rb_node * next ;
struct regmap_range_node * range_node ;
2014-09-28 11:35:25 +08:00
const char * devname = " dummy " ;
2012-10-03 13:13:16 +01:00
2017-12-22 18:42:08 +01:00
/*
* Userspace can initiate reads from the hardware over debugfs .
* Normally internal regmap structures and buffers are protected with
* a mutex or a spinlock , but if the regmap owner decided to disable
* all locking mechanisms , this is no longer the case . For safety :
* don ' t create the debugfs entries if locking is disabled .
*/
2017-12-21 12:12:50 +01:00
if ( map - > debugfs_disable ) {
dev_dbg ( map - > dev , " regmap locking disabled - not creating debugfs entries \n " ) ;
2017-12-12 16:56:43 +00:00
return ;
2017-12-21 12:12:50 +01:00
}
2017-12-12 16:56:43 +00:00
2013-10-24 15:03:41 +03:00
/* If we don't have the debugfs root yet, postpone init */
if ( ! regmap_debugfs_root ) {
struct regmap_debugfs_node * node ;
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( ! node )
return ;
node - > map = map ;
node - > name = name ;
mutex_lock ( & regmap_debugfs_early_lock ) ;
list_add ( & node - > link , & regmap_debugfs_early_list ) ;
mutex_unlock ( & regmap_debugfs_early_lock ) ;
return ;
}
2012-12-11 01:24:29 +09:00
INIT_LIST_HEAD ( & map - > debugfs_off_cache ) ;
2013-02-20 12:15:23 +00:00
mutex_init ( & map - > cache_lock ) ;
2012-12-11 01:24:29 +09:00
2014-09-28 11:35:25 +08:00
if ( map - > dev )
devname = dev_name ( map - > dev ) ;
2012-04-04 15:48:29 -06:00
if ( name ) {
map - > debugfs_name = kasprintf ( GFP_KERNEL , " %s-%s " ,
2014-09-28 11:35:25 +08:00
devname , name ) ;
2012-04-04 15:48:29 -06:00
name = map - > debugfs_name ;
} else {
2014-09-28 11:35:25 +08:00
name = devname ;
2012-04-04 15:48:29 -06:00
}
2018-03-05 15:52:09 -03:00
if ( ! strcmp ( name , " dummy " ) ) {
2019-05-17 13:23:49 +00:00
kfree ( map - > debugfs_name ) ;
2018-03-05 20:26:51 +00:00
map - > debugfs_name = kasprintf ( GFP_KERNEL , " dummy%d " ,
dummy_index ) ;
name = map - > debugfs_name ;
2018-03-05 15:52:09 -03:00
dummy_index + + ;
}
2012-04-04 15:48:29 -06:00
map - > debugfs = debugfs_create_dir ( name , regmap_debugfs_root ) ;
2011-07-20 22:56:53 +01:00
2012-02-22 14:20:09 +00:00
debugfs_create_file ( " name " , 0400 , map - > debugfs ,
map , & regmap_name_fops ) ;
2013-02-20 12:15:23 +00:00
debugfs_create_file ( " range " , 0400 , map - > debugfs ,
map , & regmap_reg_ranges_fops ) ;
2014-01-30 13:26:32 +00:00
if ( map - > max_register | | regmap_readable ( map , 0 ) ) {
2014-09-08 08:43:37 +02:00
umode_t registers_mode ;
2015-08-06 10:35:23 +08:00
# if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
registers_mode = 0600 ;
# else
registers_mode = 0400 ;
# endif
2014-09-08 08:43:37 +02:00
debugfs_create_file ( " registers " , registers_mode , map - > debugfs ,
2011-07-20 22:56:53 +01:00
map , & regmap_map_fops ) ;
2011-08-10 17:28:04 +09:00
debugfs_create_file ( " access " , 0400 , map - > debugfs ,
map , & regmap_access_fops ) ;
}
2012-02-06 18:02:06 +00:00
if ( map - > cache_type ) {
2015-06-23 14:32:55 +01:00
debugfs_create_file ( " cache_only " , 0600 , map - > debugfs ,
& map - > cache_only , & regmap_cache_only_fops ) ;
2012-02-06 18:02:06 +00:00
debugfs_create_bool ( " cache_dirty " , 0400 , map - > debugfs ,
& map - > cache_dirty ) ;
2015-06-23 14:32:55 +01:00
debugfs_create_file ( " cache_bypass " , 0600 , map - > debugfs ,
& map - > cache_bypass ,
& regmap_cache_bypass_fops ) ;
2012-02-06 18:02:06 +00:00
}
2012-10-03 13:13:16 +01:00
next = rb_first ( & map - > range_tree ) ;
while ( next ) {
range_node = rb_entry ( next , struct regmap_range_node , node ) ;
if ( range_node - > name )
debugfs_create_file ( range_node - > name , 0400 ,
map - > debugfs , range_node ,
& regmap_range_fops ) ;
next = rb_next ( & range_node - > node ) ;
}
2014-08-24 15:32:27 +02:00
if ( map - > cache_ops & & map - > cache_ops - > debugfs_init )
map - > cache_ops - > debugfs_init ( map ) ;
2011-07-20 22:56:53 +01:00
}
void regmap_debugfs_exit ( struct regmap * map )
{
2013-10-24 15:03:41 +03:00
if ( map - > debugfs ) {
debugfs_remove_recursive ( map - > debugfs ) ;
mutex_lock ( & map - > cache_lock ) ;
regmap_debugfs_free_dump_cache ( map ) ;
mutex_unlock ( & map - > cache_lock ) ;
kfree ( map - > debugfs_name ) ;
} else {
struct regmap_debugfs_node * node , * tmp ;
mutex_lock ( & regmap_debugfs_early_lock ) ;
list_for_each_entry_safe ( node , tmp , & regmap_debugfs_early_list ,
link ) {
if ( node - > map = = map ) {
list_del ( & node - > link ) ;
kfree ( node ) ;
}
}
mutex_unlock ( & regmap_debugfs_early_lock ) ;
}
2011-07-20 22:56:53 +01:00
}
void regmap_debugfs_initcall ( void )
{
2013-10-24 15:03:41 +03:00
struct regmap_debugfs_node * node , * tmp ;
2011-07-20 22:56:53 +01:00
regmap_debugfs_root = debugfs_create_dir ( " regmap " , NULL ) ;
2013-10-24 15:03:41 +03:00
mutex_lock ( & regmap_debugfs_early_lock ) ;
list_for_each_entry_safe ( node , tmp , & regmap_debugfs_early_list , link ) {
regmap_debugfs_init ( node - > map , node - > name ) ;
list_del ( & node - > link ) ;
kfree ( node ) ;
}
mutex_unlock ( & regmap_debugfs_early_lock ) ;
2011-07-20 22:56:53 +01:00
}