2011-09-19 14:34:02 +01:00
/*
* Register cache access API - rbtree caching support
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author : Dimitris Papastamos < dp @ opensource . wolfsonmicro . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2011-11-21 19:44:44 +00:00
# include <linux/debugfs.h>
2014-10-09 17:02:52 +08:00
# include <linux/device.h>
2011-09-19 14:34:02 +01:00
# include <linux/rbtree.h>
2011-11-21 19:44:44 +00:00
# include <linux/seq_file.h>
2014-10-09 17:02:52 +08:00
# include <linux/slab.h>
2011-09-19 14:34:02 +01:00
# include "internal.h"
static int regcache_rbtree_write ( struct regmap * map , unsigned int reg ,
unsigned int value ) ;
2011-11-15 13:34:40 +01:00
static int regcache_rbtree_exit ( struct regmap * map ) ;
2011-09-19 14:34:02 +01:00
struct regcache_rbtree_node {
/* block of adjacent registers */
void * block ;
2013-08-29 10:26:34 +02:00
/* Which registers are present */
long * cache_present ;
2014-04-01 13:26:48 -07:00
/* base register handled by this block */
unsigned int base_reg ;
2011-09-19 14:34:02 +01:00
/* number of registers available in the block */
unsigned int blklen ;
2014-04-01 13:26:48 -07:00
/* the actual rbtree node holding this block */
struct rb_node node ;
2011-09-19 14:34:02 +01:00
} __attribute__ ( ( packed ) ) ;
struct regcache_rbtree_ctx {
struct rb_root root ;
struct regcache_rbtree_node * cached_rbnode ;
} ;
static inline void regcache_rbtree_get_base_top_reg (
2012-04-09 13:40:24 -06:00
struct regmap * map ,
2011-09-19 14:34:02 +01:00
struct regcache_rbtree_node * rbnode ,
unsigned int * base , unsigned int * top )
{
* base = rbnode - > base_reg ;
2012-04-09 13:40:24 -06:00
* top = rbnode - > base_reg + ( ( rbnode - > blklen - 1 ) * map - > reg_stride ) ;
2011-09-19 14:34:02 +01:00
}
2013-02-21 18:03:13 +00:00
static unsigned int regcache_rbtree_get_register ( struct regmap * map ,
struct regcache_rbtree_node * rbnode , unsigned int idx )
2011-09-19 14:34:02 +01:00
{
2013-02-21 18:03:13 +00:00
return regcache_get_val ( map , rbnode - > block , idx ) ;
2011-09-19 14:34:02 +01:00
}
2013-02-21 18:03:13 +00:00
static void regcache_rbtree_set_register ( struct regmap * map ,
struct regcache_rbtree_node * rbnode ,
unsigned int idx , unsigned int val )
2011-09-19 14:34:02 +01:00
{
2013-08-29 10:26:34 +02:00
set_bit ( idx , rbnode - > cache_present ) ;
2013-02-21 18:03:13 +00:00
regcache_set_val ( map , rbnode - > block , idx , val ) ;
2011-09-19 14:34:02 +01:00
}
2011-09-27 20:15:38 +02:00
static struct regcache_rbtree_node * regcache_rbtree_lookup ( struct regmap * map ,
2013-02-21 18:03:13 +00:00
unsigned int reg )
2011-09-19 14:34:02 +01:00
{
2011-09-27 20:15:38 +02:00
struct regcache_rbtree_ctx * rbtree_ctx = map - > cache ;
2011-09-19 14:34:02 +01:00
struct rb_node * node ;
struct regcache_rbtree_node * rbnode ;
unsigned int base_reg , top_reg ;
2011-09-27 20:15:38 +02:00
rbnode = rbtree_ctx - > cached_rbnode ;
if ( rbnode ) {
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , rbnode , & base_reg ,
& top_reg ) ;
2011-09-27 20:15:38 +02:00
if ( reg > = base_reg & & reg < = top_reg )
return rbnode ;
}
node = rbtree_ctx - > root . rb_node ;
2011-09-19 14:34:02 +01:00
while ( node ) {
rbnode = container_of ( node , struct regcache_rbtree_node , node ) ;
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , rbnode , & base_reg ,
& top_reg ) ;
2011-09-27 20:15:38 +02:00
if ( reg > = base_reg & & reg < = top_reg ) {
rbtree_ctx - > cached_rbnode = rbnode ;
2011-09-19 14:34:02 +01:00
return rbnode ;
2011-09-27 20:15:38 +02:00
} else if ( reg > top_reg ) {
2011-09-19 14:34:02 +01:00
node = node - > rb_right ;
2011-09-27 20:15:38 +02:00
} else if ( reg < base_reg ) {
2011-09-19 14:34:02 +01:00
node = node - > rb_left ;
2011-09-27 20:15:38 +02:00
}
2011-09-19 14:34:02 +01:00
}
return NULL ;
}
2012-04-09 13:40:24 -06:00
static int regcache_rbtree_insert ( struct regmap * map , struct rb_root * root ,
2011-09-19 14:34:02 +01:00
struct regcache_rbtree_node * rbnode )
{
struct rb_node * * new , * parent ;
struct regcache_rbtree_node * rbnode_tmp ;
unsigned int base_reg_tmp , top_reg_tmp ;
unsigned int base_reg ;
parent = NULL ;
new = & root - > rb_node ;
while ( * new ) {
rbnode_tmp = container_of ( * new , struct regcache_rbtree_node ,
node ) ;
/* base and top registers of the current rbnode */
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , rbnode_tmp , & base_reg_tmp ,
2011-09-19 14:34:02 +01:00
& top_reg_tmp ) ;
/* base register of the rbnode to be added */
base_reg = rbnode - > base_reg ;
parent = * new ;
/* if this register has already been inserted, just return */
if ( base_reg > = base_reg_tmp & &
base_reg < = top_reg_tmp )
return 0 ;
else if ( base_reg > top_reg_tmp )
new = & ( ( * new ) - > rb_right ) ;
else if ( base_reg < base_reg_tmp )
new = & ( ( * new ) - > rb_left ) ;
}
/* insert the node into the rbtree */
rb_link_node ( & rbnode - > node , parent , new ) ;
rb_insert_color ( & rbnode - > node , root ) ;
return 1 ;
}
2011-11-21 19:44:44 +00:00
# ifdef CONFIG_DEBUG_FS
static int rbtree_show ( struct seq_file * s , void * ignored )
{
struct regmap * map = s - > private ;
struct regcache_rbtree_ctx * rbtree_ctx = map - > cache ;
struct regcache_rbtree_node * n ;
struct rb_node * node ;
unsigned int base , top ;
2013-03-12 17:26:49 +00:00
size_t mem_size ;
2011-11-21 19:44:44 +00:00
int nodes = 0 ;
int registers = 0 ;
2012-04-09 13:40:24 -06:00
int this_registers , average ;
2011-11-21 19:44:44 +00:00
2013-05-23 17:23:49 +02:00
map - > lock ( map - > lock_arg ) ;
2011-11-21 19:44:44 +00:00
2013-03-12 17:26:49 +00:00
mem_size = sizeof ( * rbtree_ctx ) ;
2011-11-21 19:44:44 +00:00
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ! = NULL ;
node = rb_next ( node ) ) {
n = container_of ( node , struct regcache_rbtree_node , node ) ;
2013-03-12 17:26:49 +00:00
mem_size + = sizeof ( * n ) ;
mem_size + = ( n - > blklen * map - > cache_word_size ) ;
2013-08-29 10:26:34 +02:00
mem_size + = BITS_TO_LONGS ( n - > blklen ) * sizeof ( long ) ;
2011-11-21 19:44:44 +00:00
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , n , & base , & top ) ;
this_registers = ( ( top - base ) / map - > reg_stride ) + 1 ;
seq_printf ( s , " %x-%x (%d) \n " , base , top , this_registers ) ;
2011-11-21 19:44:44 +00:00
nodes + + ;
2012-04-09 13:40:24 -06:00
registers + = this_registers ;
2011-11-21 19:44:44 +00:00
}
2012-04-04 15:48:33 -06:00
if ( nodes )
average = registers / nodes ;
else
average = 0 ;
2013-03-12 17:26:49 +00:00
seq_printf ( s , " %d nodes, %d registers, average %d registers, used %zu bytes \n " ,
nodes , registers , average , mem_size ) ;
2011-11-21 19:44:44 +00:00
2013-05-23 17:23:49 +02:00
map - > unlock ( map - > lock_arg ) ;
2011-11-21 19:44:44 +00:00
return 0 ;
}
static int rbtree_open ( struct inode * inode , struct file * file )
{
return single_open ( file , rbtree_show , inode - > i_private ) ;
}
static const struct file_operations rbtree_fops = {
. open = rbtree_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2011-11-22 11:33:31 +00:00
static void rbtree_debugfs_init ( struct regmap * map )
{
debugfs_create_file ( " rbtree " , 0400 , map - > debugfs , map , & rbtree_fops ) ;
}
2011-11-21 19:44:44 +00:00
# endif
2011-09-19 14:34:02 +01:00
static int regcache_rbtree_init ( struct regmap * map )
{
struct regcache_rbtree_ctx * rbtree_ctx ;
int i ;
int ret ;
map - > cache = kmalloc ( sizeof * rbtree_ctx , GFP_KERNEL ) ;
if ( ! map - > cache )
return - ENOMEM ;
rbtree_ctx = map - > cache ;
rbtree_ctx - > root = RB_ROOT ;
rbtree_ctx - > cached_rbnode = NULL ;
for ( i = 0 ; i < map - > num_reg_defaults ; i + + ) {
ret = regcache_rbtree_write ( map ,
map - > reg_defaults [ i ] . reg ,
map - > reg_defaults [ i ] . def ) ;
if ( ret )
goto err ;
}
return 0 ;
err :
2011-11-15 13:34:40 +01:00
regcache_rbtree_exit ( map ) ;
2011-09-19 14:34:02 +01:00
return ret ;
}
static int regcache_rbtree_exit ( struct regmap * map )
{
struct rb_node * next ;
struct regcache_rbtree_ctx * rbtree_ctx ;
struct regcache_rbtree_node * rbtree_node ;
/* if we've already been called then just return */
rbtree_ctx = map - > cache ;
if ( ! rbtree_ctx )
return 0 ;
/* free up the rbtree */
next = rb_first ( & rbtree_ctx - > root ) ;
while ( next ) {
rbtree_node = rb_entry ( next , struct regcache_rbtree_node , node ) ;
next = rb_next ( & rbtree_node - > node ) ;
rb_erase ( & rbtree_node - > node , & rbtree_ctx - > root ) ;
2013-08-29 10:26:34 +02:00
kfree ( rbtree_node - > cache_present ) ;
2011-09-19 14:34:02 +01:00
kfree ( rbtree_node - > block ) ;
kfree ( rbtree_node ) ;
}
/* release the resources */
kfree ( map - > cache ) ;
map - > cache = NULL ;
return 0 ;
}
static int regcache_rbtree_read ( struct regmap * map ,
unsigned int reg , unsigned int * value )
{
struct regcache_rbtree_node * rbnode ;
unsigned int reg_tmp ;
2011-09-27 20:15:38 +02:00
rbnode = regcache_rbtree_lookup ( map , reg ) ;
2011-09-19 14:34:02 +01:00
if ( rbnode ) {
2012-04-09 13:40:24 -06:00
reg_tmp = ( reg - rbnode - > base_reg ) / map - > reg_stride ;
2013-08-29 10:26:34 +02:00
if ( ! test_bit ( reg_tmp , rbnode - > cache_present ) )
2013-03-15 14:54:35 +00:00
return - ENOENT ;
2013-02-21 18:03:13 +00:00
* value = regcache_rbtree_get_register ( map , rbnode , reg_tmp ) ;
2011-09-19 14:34:02 +01:00
} else {
2011-10-09 13:23:31 +01:00
return - ENOENT ;
2011-09-19 14:34:02 +01:00
}
return 0 ;
}
2013-02-21 18:03:13 +00:00
static int regcache_rbtree_insert_to_block ( struct regmap * map ,
struct regcache_rbtree_node * rbnode ,
2013-08-29 10:26:33 +02:00
unsigned int base_reg ,
unsigned int top_reg ,
unsigned int reg ,
2013-02-21 18:03:13 +00:00
unsigned int value )
2011-09-19 14:34:02 +01:00
{
2013-08-29 10:26:33 +02:00
unsigned int blklen ;
unsigned int pos , offset ;
2013-08-29 10:26:34 +02:00
unsigned long * present ;
2011-09-19 14:34:02 +01:00
u8 * blk ;
2013-08-29 10:26:33 +02:00
blklen = ( top_reg - base_reg ) / map - > reg_stride + 1 ;
pos = ( reg - base_reg ) / map - > reg_stride ;
offset = ( rbnode - > base_reg - base_reg ) / map - > reg_stride ;
2011-09-19 14:34:02 +01:00
blk = krealloc ( rbnode - > block ,
2013-08-29 10:26:33 +02:00
blklen * map - > cache_word_size ,
2013-02-21 18:03:13 +00:00
GFP_KERNEL ) ;
2011-09-19 14:34:02 +01:00
if ( ! blk )
return - ENOMEM ;
2013-08-29 10:26:34 +02:00
present = krealloc ( rbnode - > cache_present ,
BITS_TO_LONGS ( blklen ) * sizeof ( * present ) , GFP_KERNEL ) ;
if ( ! present ) {
kfree ( blk ) ;
return - ENOMEM ;
}
2011-09-19 14:34:02 +01:00
/* insert the register value in the correct place in the rbnode block */
2013-08-29 10:26:34 +02:00
if ( pos = = 0 ) {
2013-08-29 10:26:33 +02:00
memmove ( blk + offset * map - > cache_word_size ,
blk , rbnode - > blklen * map - > cache_word_size ) ;
2013-08-29 10:26:34 +02:00
bitmap_shift_right ( present , present , offset , blklen ) ;
}
2011-09-19 14:34:02 +01:00
/* update the rbnode block, its size and the base register */
rbnode - > block = blk ;
2013-08-29 10:26:33 +02:00
rbnode - > blklen = blklen ;
rbnode - > base_reg = base_reg ;
2013-08-29 10:26:34 +02:00
rbnode - > cache_present = present ;
2011-09-19 14:34:02 +01:00
2013-02-21 18:03:13 +00:00
regcache_rbtree_set_register ( map , rbnode , pos , value ) ;
2011-09-19 14:34:02 +01:00
return 0 ;
}
2013-05-08 13:55:24 +01:00
static struct regcache_rbtree_node *
regcache_rbtree_node_alloc ( struct regmap * map , unsigned int reg )
{
struct regcache_rbtree_node * rbnode ;
2013-05-08 13:55:25 +01:00
const struct regmap_range * range ;
int i ;
2013-05-08 13:55:24 +01:00
rbnode = kzalloc ( sizeof ( * rbnode ) , GFP_KERNEL ) ;
if ( ! rbnode )
return NULL ;
2013-05-08 13:55:25 +01:00
/* If there is a read table then use it to guess at an allocation */
if ( map - > rd_table ) {
for ( i = 0 ; i < map - > rd_table - > n_yes_ranges ; i + + ) {
if ( regmap_reg_in_range ( reg ,
& map - > rd_table - > yes_ranges [ i ] ) )
break ;
}
if ( i ! = map - > rd_table - > n_yes_ranges ) {
range = & map - > rd_table - > yes_ranges [ i ] ;
2013-08-27 13:03:03 +02:00
rbnode - > blklen = ( range - > range_max - range - > range_min ) /
map - > reg_stride + 1 ;
2013-05-08 13:55:25 +01:00
rbnode - > base_reg = range - > range_min ;
}
}
if ( ! rbnode - > blklen ) {
2013-08-21 17:37:22 +02:00
rbnode - > blklen = 1 ;
2013-05-08 13:55:25 +01:00
rbnode - > base_reg = reg ;
}
2013-05-08 13:55:24 +01:00
rbnode - > block = kmalloc ( rbnode - > blklen * map - > cache_word_size ,
GFP_KERNEL ) ;
2013-08-29 10:26:34 +02:00
if ( ! rbnode - > block )
goto err_free ;
rbnode - > cache_present = kzalloc ( BITS_TO_LONGS ( rbnode - > blklen ) *
sizeof ( * rbnode - > cache_present ) , GFP_KERNEL ) ;
if ( ! rbnode - > cache_present )
goto err_free_block ;
2013-05-08 13:55:24 +01:00
return rbnode ;
2013-08-29 10:26:34 +02:00
err_free_block :
kfree ( rbnode - > block ) ;
err_free :
kfree ( rbnode ) ;
return NULL ;
2013-05-08 13:55:24 +01:00
}
2011-09-19 14:34:02 +01:00
static int regcache_rbtree_write ( struct regmap * map , unsigned int reg ,
unsigned int value )
{
struct regcache_rbtree_ctx * rbtree_ctx ;
struct regcache_rbtree_node * rbnode , * rbnode_tmp ;
struct rb_node * node ;
unsigned int reg_tmp ;
int ret ;
rbtree_ctx = map - > cache ;
2013-03-15 14:54:35 +00:00
2011-09-19 14:34:02 +01:00
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it .
*/
2011-09-27 20:15:38 +02:00
rbnode = regcache_rbtree_lookup ( map , reg ) ;
2011-09-19 14:34:02 +01:00
if ( rbnode ) {
2012-04-09 13:40:24 -06:00
reg_tmp = ( reg - rbnode - > base_reg ) / map - > reg_stride ;
2013-02-21 18:03:13 +00:00
regcache_rbtree_set_register ( map , rbnode , reg_tmp , value ) ;
2011-09-19 14:34:02 +01:00
} else {
2013-08-29 10:26:33 +02:00
unsigned int base_reg , top_reg ;
unsigned int new_base_reg , new_top_reg ;
unsigned int min , max ;
unsigned int max_dist ;
max_dist = map - > reg_stride * sizeof ( * rbnode_tmp ) /
map - > cache_word_size ;
if ( reg < max_dist )
min = 0 ;
else
min = reg - max_dist ;
max = reg + max_dist ;
2011-09-19 14:34:02 +01:00
/* look for an adjacent register to the one we are about to add */
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ;
node = rb_next ( node ) ) {
2012-04-09 13:40:24 -06:00
rbnode_tmp = rb_entry ( node , struct regcache_rbtree_node ,
node ) ;
2013-08-29 10:26:32 +02:00
regcache_rbtree_get_base_top_reg ( map , rbnode_tmp ,
& base_reg , & top_reg ) ;
2013-08-29 10:26:33 +02:00
if ( base_reg < = max & & top_reg > = min ) {
new_base_reg = min ( reg , base_reg ) ;
new_top_reg = max ( reg , top_reg ) ;
} else {
2013-08-29 10:26:32 +02:00
continue ;
2011-09-19 14:34:02 +01:00
}
2013-08-29 10:26:32 +02:00
ret = regcache_rbtree_insert_to_block ( map , rbnode_tmp ,
2013-08-29 10:26:33 +02:00
new_base_reg ,
new_top_reg , reg ,
value ) ;
2013-08-29 10:26:32 +02:00
if ( ret )
return ret ;
rbtree_ctx - > cached_rbnode = rbnode_tmp ;
return 0 ;
2011-09-19 14:34:02 +01:00
}
2013-05-08 13:55:24 +01:00
/* We did not manage to find a place to insert it in
* an existing block so create a new rbnode .
2011-09-19 14:34:02 +01:00
*/
2013-05-08 13:55:24 +01:00
rbnode = regcache_rbtree_node_alloc ( map , reg ) ;
2011-09-19 14:34:02 +01:00
if ( ! rbnode )
return - ENOMEM ;
2013-05-08 13:55:24 +01:00
regcache_rbtree_set_register ( map , rbnode ,
reg - rbnode - > base_reg , value ) ;
2012-04-09 13:40:24 -06:00
regcache_rbtree_insert ( map , & rbtree_ctx - > root , rbnode ) ;
2011-09-19 14:34:02 +01:00
rbtree_ctx - > cached_rbnode = rbnode ;
}
return 0 ;
}
2012-02-23 19:31:04 +00:00
static int regcache_rbtree_sync ( struct regmap * map , unsigned int min ,
unsigned int max )
2011-09-19 14:34:02 +01:00
{
struct regcache_rbtree_ctx * rbtree_ctx ;
struct rb_node * node ;
struct regcache_rbtree_node * rbnode ;
2013-08-27 13:03:03 +02:00
unsigned int base_reg , top_reg ;
unsigned int start , end ;
2011-09-19 14:34:02 +01:00
int ret ;
rbtree_ctx = map - > cache ;
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ; node = rb_next ( node ) ) {
rbnode = rb_entry ( node , struct regcache_rbtree_node , node ) ;
2012-02-23 19:31:04 +00:00
2013-08-27 13:03:03 +02:00
regcache_rbtree_get_base_top_reg ( map , rbnode , & base_reg ,
& top_reg ) ;
if ( base_reg > max )
2012-02-23 19:31:04 +00:00
break ;
2013-08-27 13:03:03 +02:00
if ( top_reg < min )
2012-02-23 19:31:04 +00:00
continue ;
2013-08-27 13:03:03 +02:00
if ( min > base_reg )
start = ( min - base_reg ) / map - > reg_stride ;
2012-02-23 19:31:04 +00:00
else
2013-08-27 13:03:03 +02:00
start = 0 ;
2012-02-23 19:31:04 +00:00
2013-08-27 13:03:03 +02:00
if ( max < top_reg )
end = ( max - base_reg ) / map - > reg_stride + 1 ;
2012-02-23 19:31:04 +00:00
else
end = rbnode - > blklen ;
2013-08-29 10:26:34 +02:00
ret = regcache_sync_block ( map , rbnode - > block ,
rbnode - > cache_present ,
rbnode - > base_reg , start , end ) ;
2013-03-29 19:32:28 +00:00
if ( ret ! = 0 )
return ret ;
2011-09-19 14:34:02 +01:00
}
2013-03-29 19:32:28 +00:00
return regmap_async_complete ( map ) ;
2011-09-19 14:34:02 +01:00
}
2013-08-29 10:26:34 +02:00
static int regcache_rbtree_drop ( struct regmap * map , unsigned int min ,
unsigned int max )
{
struct regcache_rbtree_ctx * rbtree_ctx ;
struct regcache_rbtree_node * rbnode ;
struct rb_node * node ;
unsigned int base_reg , top_reg ;
unsigned int start , end ;
rbtree_ctx = map - > cache ;
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ; node = rb_next ( node ) ) {
rbnode = rb_entry ( node , struct regcache_rbtree_node , node ) ;
regcache_rbtree_get_base_top_reg ( map , rbnode , & base_reg ,
& top_reg ) ;
if ( base_reg > max )
break ;
if ( top_reg < min )
continue ;
if ( min > base_reg )
start = ( min - base_reg ) / map - > reg_stride ;
else
start = 0 ;
if ( max < top_reg )
end = ( max - base_reg ) / map - > reg_stride + 1 ;
else
end = rbnode - > blklen ;
bitmap_clear ( rbnode - > cache_present , start , end - start ) ;
}
return 0 ;
}
2011-09-19 14:34:02 +01:00
struct regcache_ops regcache_rbtree_ops = {
. type = REGCACHE_RBTREE ,
. name = " rbtree " ,
. init = regcache_rbtree_init ,
. exit = regcache_rbtree_exit ,
2014-08-24 15:32:27 +02:00
# ifdef CONFIG_DEBUG_FS
. debugfs_init = rbtree_debugfs_init ,
# endif
2011-09-19 14:34:02 +01:00
. read = regcache_rbtree_read ,
. write = regcache_rbtree_write ,
2013-08-29 10:26:34 +02:00
. sync = regcache_rbtree_sync ,
. drop = regcache_rbtree_drop ,
2011-09-19 14:34:02 +01:00
} ;