2011-09-19 14:34:02 +01:00
/*
* Register cache access API - rbtree caching support
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author : Dimitris Papastamos < dp @ opensource . wolfsonmicro . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/slab.h>
2012-01-22 11:23:42 -05:00
# include <linux/device.h>
2011-11-21 19:44:44 +00:00
# include <linux/debugfs.h>
2011-09-19 14:34:02 +01:00
# include <linux/rbtree.h>
2011-11-21 19:44:44 +00:00
# include <linux/seq_file.h>
2011-09-19 14:34:02 +01:00
# include "internal.h"
static int regcache_rbtree_write ( struct regmap * map , unsigned int reg ,
unsigned int value ) ;
2011-11-15 13:34:40 +01:00
static int regcache_rbtree_exit ( struct regmap * map ) ;
2011-09-19 14:34:02 +01:00
struct regcache_rbtree_node {
/* the actual rbtree node holding this block */
struct rb_node node ;
/* base register handled by this block */
unsigned int base_reg ;
/* block of adjacent registers */
void * block ;
/* number of registers available in the block */
unsigned int blklen ;
} __attribute__ ( ( packed ) ) ;
struct regcache_rbtree_ctx {
struct rb_root root ;
struct regcache_rbtree_node * cached_rbnode ;
} ;
static inline void regcache_rbtree_get_base_top_reg (
2012-04-09 13:40:24 -06:00
struct regmap * map ,
2011-09-19 14:34:02 +01:00
struct regcache_rbtree_node * rbnode ,
unsigned int * base , unsigned int * top )
{
* base = rbnode - > base_reg ;
2012-04-09 13:40:24 -06:00
* top = rbnode - > base_reg + ( ( rbnode - > blklen - 1 ) * map - > reg_stride ) ;
2011-09-19 14:34:02 +01:00
}
2013-02-21 18:03:13 +00:00
static unsigned int regcache_rbtree_get_register ( struct regmap * map ,
struct regcache_rbtree_node * rbnode , unsigned int idx )
2011-09-19 14:34:02 +01:00
{
2013-02-21 18:03:13 +00:00
return regcache_get_val ( map , rbnode - > block , idx ) ;
2011-09-19 14:34:02 +01:00
}
2013-02-21 18:03:13 +00:00
static void regcache_rbtree_set_register ( struct regmap * map ,
struct regcache_rbtree_node * rbnode ,
unsigned int idx , unsigned int val )
2011-09-19 14:34:02 +01:00
{
2013-02-21 18:03:13 +00:00
regcache_set_val ( map , rbnode - > block , idx , val ) ;
2011-09-19 14:34:02 +01:00
}
2011-09-27 20:15:38 +02:00
static struct regcache_rbtree_node * regcache_rbtree_lookup ( struct regmap * map ,
2013-02-21 18:03:13 +00:00
unsigned int reg )
2011-09-19 14:34:02 +01:00
{
2011-09-27 20:15:38 +02:00
struct regcache_rbtree_ctx * rbtree_ctx = map - > cache ;
2011-09-19 14:34:02 +01:00
struct rb_node * node ;
struct regcache_rbtree_node * rbnode ;
unsigned int base_reg , top_reg ;
2011-09-27 20:15:38 +02:00
rbnode = rbtree_ctx - > cached_rbnode ;
if ( rbnode ) {
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , rbnode , & base_reg ,
& top_reg ) ;
2011-09-27 20:15:38 +02:00
if ( reg > = base_reg & & reg < = top_reg )
return rbnode ;
}
node = rbtree_ctx - > root . rb_node ;
2011-09-19 14:34:02 +01:00
while ( node ) {
rbnode = container_of ( node , struct regcache_rbtree_node , node ) ;
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , rbnode , & base_reg ,
& top_reg ) ;
2011-09-27 20:15:38 +02:00
if ( reg > = base_reg & & reg < = top_reg ) {
rbtree_ctx - > cached_rbnode = rbnode ;
2011-09-19 14:34:02 +01:00
return rbnode ;
2011-09-27 20:15:38 +02:00
} else if ( reg > top_reg ) {
2011-09-19 14:34:02 +01:00
node = node - > rb_right ;
2011-09-27 20:15:38 +02:00
} else if ( reg < base_reg ) {
2011-09-19 14:34:02 +01:00
node = node - > rb_left ;
2011-09-27 20:15:38 +02:00
}
2011-09-19 14:34:02 +01:00
}
return NULL ;
}
2012-04-09 13:40:24 -06:00
static int regcache_rbtree_insert ( struct regmap * map , struct rb_root * root ,
2011-09-19 14:34:02 +01:00
struct regcache_rbtree_node * rbnode )
{
struct rb_node * * new , * parent ;
struct regcache_rbtree_node * rbnode_tmp ;
unsigned int base_reg_tmp , top_reg_tmp ;
unsigned int base_reg ;
parent = NULL ;
new = & root - > rb_node ;
while ( * new ) {
rbnode_tmp = container_of ( * new , struct regcache_rbtree_node ,
node ) ;
/* base and top registers of the current rbnode */
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , rbnode_tmp , & base_reg_tmp ,
2011-09-19 14:34:02 +01:00
& top_reg_tmp ) ;
/* base register of the rbnode to be added */
base_reg = rbnode - > base_reg ;
parent = * new ;
/* if this register has already been inserted, just return */
if ( base_reg > = base_reg_tmp & &
base_reg < = top_reg_tmp )
return 0 ;
else if ( base_reg > top_reg_tmp )
new = & ( ( * new ) - > rb_right ) ;
else if ( base_reg < base_reg_tmp )
new = & ( ( * new ) - > rb_left ) ;
}
/* insert the node into the rbtree */
rb_link_node ( & rbnode - > node , parent , new ) ;
rb_insert_color ( & rbnode - > node , root ) ;
return 1 ;
}
2011-11-21 19:44:44 +00:00
# ifdef CONFIG_DEBUG_FS
static int rbtree_show ( struct seq_file * s , void * ignored )
{
struct regmap * map = s - > private ;
struct regcache_rbtree_ctx * rbtree_ctx = map - > cache ;
struct regcache_rbtree_node * n ;
struct rb_node * node ;
unsigned int base , top ;
2013-03-12 17:26:49 +00:00
size_t mem_size ;
2011-11-21 19:44:44 +00:00
int nodes = 0 ;
int registers = 0 ;
2012-04-09 13:40:24 -06:00
int this_registers , average ;
2011-11-21 19:44:44 +00:00
2013-05-23 17:23:49 +02:00
map - > lock ( map - > lock_arg ) ;
2011-11-21 19:44:44 +00:00
2013-03-12 17:26:49 +00:00
mem_size = sizeof ( * rbtree_ctx ) ;
2013-03-29 19:18:59 +00:00
mem_size + = BITS_TO_LONGS ( map - > cache_present_nbits ) * sizeof ( long ) ;
2013-03-12 17:26:49 +00:00
2011-11-21 19:44:44 +00:00
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ! = NULL ;
node = rb_next ( node ) ) {
n = container_of ( node , struct regcache_rbtree_node , node ) ;
2013-03-12 17:26:49 +00:00
mem_size + = sizeof ( * n ) ;
mem_size + = ( n - > blklen * map - > cache_word_size ) ;
2011-11-21 19:44:44 +00:00
2012-04-09 13:40:24 -06:00
regcache_rbtree_get_base_top_reg ( map , n , & base , & top ) ;
this_registers = ( ( top - base ) / map - > reg_stride ) + 1 ;
seq_printf ( s , " %x-%x (%d) \n " , base , top , this_registers ) ;
2011-11-21 19:44:44 +00:00
nodes + + ;
2012-04-09 13:40:24 -06:00
registers + = this_registers ;
2011-11-21 19:44:44 +00:00
}
2012-04-04 15:48:33 -06:00
if ( nodes )
average = registers / nodes ;
else
average = 0 ;
2013-03-12 17:26:49 +00:00
seq_printf ( s , " %d nodes, %d registers, average %d registers, used %zu bytes \n " ,
nodes , registers , average , mem_size ) ;
2011-11-21 19:44:44 +00:00
2013-05-23 17:23:49 +02:00
map - > unlock ( map - > lock_arg ) ;
2011-11-21 19:44:44 +00:00
return 0 ;
}
static int rbtree_open ( struct inode * inode , struct file * file )
{
return single_open ( file , rbtree_show , inode - > i_private ) ;
}
static const struct file_operations rbtree_fops = {
. open = rbtree_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2011-11-22 11:33:31 +00:00
static void rbtree_debugfs_init ( struct regmap * map )
{
debugfs_create_file ( " rbtree " , 0400 , map - > debugfs , map , & rbtree_fops ) ;
}
# else
static void rbtree_debugfs_init ( struct regmap * map )
{
}
2011-11-21 19:44:44 +00:00
# endif
2011-09-19 14:34:02 +01:00
static int regcache_rbtree_init ( struct regmap * map )
{
struct regcache_rbtree_ctx * rbtree_ctx ;
int i ;
int ret ;
map - > cache = kmalloc ( sizeof * rbtree_ctx , GFP_KERNEL ) ;
if ( ! map - > cache )
return - ENOMEM ;
rbtree_ctx = map - > cache ;
rbtree_ctx - > root = RB_ROOT ;
rbtree_ctx - > cached_rbnode = NULL ;
for ( i = 0 ; i < map - > num_reg_defaults ; i + + ) {
ret = regcache_rbtree_write ( map ,
map - > reg_defaults [ i ] . reg ,
map - > reg_defaults [ i ] . def ) ;
if ( ret )
goto err ;
}
2011-11-22 11:33:31 +00:00
rbtree_debugfs_init ( map ) ;
2011-11-21 19:44:44 +00:00
2011-09-19 14:34:02 +01:00
return 0 ;
err :
2011-11-15 13:34:40 +01:00
regcache_rbtree_exit ( map ) ;
2011-09-19 14:34:02 +01:00
return ret ;
}
static int regcache_rbtree_exit ( struct regmap * map )
{
struct rb_node * next ;
struct regcache_rbtree_ctx * rbtree_ctx ;
struct regcache_rbtree_node * rbtree_node ;
/* if we've already been called then just return */
rbtree_ctx = map - > cache ;
if ( ! rbtree_ctx )
return 0 ;
/* free up the rbtree */
next = rb_first ( & rbtree_ctx - > root ) ;
while ( next ) {
rbtree_node = rb_entry ( next , struct regcache_rbtree_node , node ) ;
next = rb_next ( & rbtree_node - > node ) ;
rb_erase ( & rbtree_node - > node , & rbtree_ctx - > root ) ;
kfree ( rbtree_node - > block ) ;
kfree ( rbtree_node ) ;
}
/* release the resources */
kfree ( map - > cache ) ;
map - > cache = NULL ;
return 0 ;
}
static int regcache_rbtree_read ( struct regmap * map ,
unsigned int reg , unsigned int * value )
{
struct regcache_rbtree_node * rbnode ;
unsigned int reg_tmp ;
2011-09-27 20:15:38 +02:00
rbnode = regcache_rbtree_lookup ( map , reg ) ;
2011-09-19 14:34:02 +01:00
if ( rbnode ) {
2012-04-09 13:40:24 -06:00
reg_tmp = ( reg - rbnode - > base_reg ) / map - > reg_stride ;
2013-03-15 14:54:35 +00:00
if ( ! regcache_reg_present ( map , reg ) )
return - ENOENT ;
2013-02-21 18:03:13 +00:00
* value = regcache_rbtree_get_register ( map , rbnode , reg_tmp ) ;
2011-09-19 14:34:02 +01:00
} else {
2011-10-09 13:23:31 +01:00
return - ENOENT ;
2011-09-19 14:34:02 +01:00
}
return 0 ;
}
2013-02-21 18:03:13 +00:00
static int regcache_rbtree_insert_to_block ( struct regmap * map ,
struct regcache_rbtree_node * rbnode ,
2011-09-19 14:34:02 +01:00
unsigned int pos , unsigned int reg ,
2013-02-21 18:03:13 +00:00
unsigned int value )
2011-09-19 14:34:02 +01:00
{
u8 * blk ;
blk = krealloc ( rbnode - > block ,
2013-02-21 18:03:13 +00:00
( rbnode - > blklen + 1 ) * map - > cache_word_size ,
GFP_KERNEL ) ;
2011-09-19 14:34:02 +01:00
if ( ! blk )
return - ENOMEM ;
/* insert the register value in the correct place in the rbnode block */
2013-02-21 18:03:13 +00:00
memmove ( blk + ( pos + 1 ) * map - > cache_word_size ,
blk + pos * map - > cache_word_size ,
( rbnode - > blklen - pos ) * map - > cache_word_size ) ;
2011-09-19 14:34:02 +01:00
/* update the rbnode block, its size and the base register */
rbnode - > block = blk ;
rbnode - > blklen + + ;
if ( ! pos )
rbnode - > base_reg = reg ;
2013-02-21 18:03:13 +00:00
regcache_rbtree_set_register ( map , rbnode , pos , value ) ;
2011-09-19 14:34:02 +01:00
return 0 ;
}
2013-05-08 13:55:24 +01:00
static struct regcache_rbtree_node *
regcache_rbtree_node_alloc ( struct regmap * map , unsigned int reg )
{
struct regcache_rbtree_node * rbnode ;
2013-05-08 13:55:25 +01:00
const struct regmap_range * range ;
int i ;
2013-05-08 13:55:24 +01:00
rbnode = kzalloc ( sizeof ( * rbnode ) , GFP_KERNEL ) ;
if ( ! rbnode )
return NULL ;
2013-05-08 13:55:25 +01:00
/* If there is a read table then use it to guess at an allocation */
if ( map - > rd_table ) {
for ( i = 0 ; i < map - > rd_table - > n_yes_ranges ; i + + ) {
if ( regmap_reg_in_range ( reg ,
& map - > rd_table - > yes_ranges [ i ] ) )
break ;
}
if ( i ! = map - > rd_table - > n_yes_ranges ) {
range = & map - > rd_table - > yes_ranges [ i ] ;
rbnode - > blklen = range - > range_max - range - > range_min
+ 1 ;
rbnode - > base_reg = range - > range_min ;
}
}
if ( ! rbnode - > blklen ) {
rbnode - > blklen = sizeof ( * rbnode ) ;
rbnode - > base_reg = reg ;
}
2013-05-08 13:55:24 +01:00
rbnode - > block = kmalloc ( rbnode - > blklen * map - > cache_word_size ,
GFP_KERNEL ) ;
if ( ! rbnode - > block ) {
kfree ( rbnode ) ;
return NULL ;
}
return rbnode ;
}
2011-09-19 14:34:02 +01:00
static int regcache_rbtree_write ( struct regmap * map , unsigned int reg ,
unsigned int value )
{
struct regcache_rbtree_ctx * rbtree_ctx ;
struct regcache_rbtree_node * rbnode , * rbnode_tmp ;
struct rb_node * node ;
unsigned int reg_tmp ;
unsigned int pos ;
int i ;
int ret ;
rbtree_ctx = map - > cache ;
2013-03-15 14:54:35 +00:00
/* update the reg_present bitmap, make space if necessary */
2013-03-29 19:18:59 +00:00
ret = regcache_set_reg_present ( map , reg ) ;
2013-03-15 14:54:35 +00:00
if ( ret < 0 )
return ret ;
2011-09-19 14:34:02 +01:00
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it .
*/
2011-09-27 20:15:38 +02:00
rbnode = regcache_rbtree_lookup ( map , reg ) ;
2011-09-19 14:34:02 +01:00
if ( rbnode ) {
2012-04-09 13:40:24 -06:00
reg_tmp = ( reg - rbnode - > base_reg ) / map - > reg_stride ;
2013-02-21 18:03:13 +00:00
regcache_rbtree_set_register ( map , rbnode , reg_tmp , value ) ;
2011-09-19 14:34:02 +01:00
} else {
/* look for an adjacent register to the one we are about to add */
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ;
node = rb_next ( node ) ) {
2012-04-09 13:40:24 -06:00
rbnode_tmp = rb_entry ( node , struct regcache_rbtree_node ,
node ) ;
2011-09-19 14:34:02 +01:00
for ( i = 0 ; i < rbnode_tmp - > blklen ; i + + ) {
2012-04-09 13:40:24 -06:00
reg_tmp = rbnode_tmp - > base_reg +
( i * map - > reg_stride ) ;
if ( abs ( reg_tmp - reg ) ! = map - > reg_stride )
2011-09-19 14:34:02 +01:00
continue ;
/* decide where in the block to place our register */
2012-04-09 13:40:24 -06:00
if ( reg_tmp + map - > reg_stride = = reg )
2011-09-19 14:34:02 +01:00
pos = i + 1 ;
else
pos = i ;
2013-02-21 18:03:13 +00:00
ret = regcache_rbtree_insert_to_block ( map ,
rbnode_tmp ,
pos , reg ,
value ) ;
2011-09-19 14:34:02 +01:00
if ( ret )
return ret ;
rbtree_ctx - > cached_rbnode = rbnode_tmp ;
return 0 ;
}
}
2013-05-08 13:55:24 +01:00
/* We did not manage to find a place to insert it in
* an existing block so create a new rbnode .
2011-09-19 14:34:02 +01:00
*/
2013-05-08 13:55:24 +01:00
rbnode = regcache_rbtree_node_alloc ( map , reg ) ;
2011-09-19 14:34:02 +01:00
if ( ! rbnode )
return - ENOMEM ;
2013-05-08 13:55:24 +01:00
regcache_rbtree_set_register ( map , rbnode ,
reg - rbnode - > base_reg , value ) ;
2012-04-09 13:40:24 -06:00
regcache_rbtree_insert ( map , & rbtree_ctx - > root , rbnode ) ;
2011-09-19 14:34:02 +01:00
rbtree_ctx - > cached_rbnode = rbnode ;
}
return 0 ;
}
2012-02-23 19:31:04 +00:00
static int regcache_rbtree_sync ( struct regmap * map , unsigned int min ,
unsigned int max )
2011-09-19 14:34:02 +01:00
{
struct regcache_rbtree_ctx * rbtree_ctx ;
struct rb_node * node ;
struct regcache_rbtree_node * rbnode ;
int ret ;
2013-03-29 19:32:28 +00:00
int base , end ;
2011-09-19 14:34:02 +01:00
rbtree_ctx = map - > cache ;
for ( node = rb_first ( & rbtree_ctx - > root ) ; node ; node = rb_next ( node ) ) {
rbnode = rb_entry ( node , struct regcache_rbtree_node , node ) ;
2012-02-23 19:31:04 +00:00
if ( rbnode - > base_reg > max )
break ;
if ( rbnode - > base_reg + rbnode - > blklen < min )
continue ;
2012-03-05 23:28:49 +00:00
if ( min > rbnode - > base_reg )
2012-02-23 19:31:04 +00:00
base = min - rbnode - > base_reg ;
else
base = 0 ;
if ( max < rbnode - > base_reg + rbnode - > blklen )
2013-03-13 16:38:33 +01:00
end = max - rbnode - > base_reg + 1 ;
2012-02-23 19:31:04 +00:00
else
end = rbnode - > blklen ;
2013-03-29 19:32:28 +00:00
ret = regcache_sync_block ( map , rbnode - > block , rbnode - > base_reg ,
base , end ) ;
if ( ret ! = 0 )
return ret ;
2011-09-19 14:34:02 +01:00
}
2013-03-29 19:32:28 +00:00
return regmap_async_complete ( map ) ;
2011-09-19 14:34:02 +01:00
}
struct regcache_ops regcache_rbtree_ops = {
. type = REGCACHE_RBTREE ,
. name = " rbtree " ,
. init = regcache_rbtree_init ,
. exit = regcache_rbtree_exit ,
. read = regcache_rbtree_read ,
. write = regcache_rbtree_write ,
. sync = regcache_rbtree_sync
} ;