2011-09-19 17:34:00 +04:00
/*
* Register cache access API
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author : Dimitris Papastamos < dp @ opensource . wolfsonmicro . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2011-10-05 01:05:47 +04:00
# include <linux/bsearch.h>
2014-10-09 13:02:52 +04:00
# include <linux/device.h>
# include <linux/export.h>
# include <linux/slab.h>
2011-10-03 13:50:14 +04:00
# include <linux/sort.h>
2011-09-19 17:34:00 +04:00
2015-03-20 00:50:47 +03:00
# include "trace.h"
2011-09-19 17:34:00 +04:00
# include "internal.h"
static const struct regcache_ops * cache_types [ ] = {
2011-09-19 17:34:02 +04:00
& regcache_rbtree_ops ,
2011-09-19 17:34:03 +04:00
& regcache_lzo_ops ,
2012-12-19 18:51:55 +04:00
& regcache_flat_ops ,
2011-09-19 17:34:00 +04:00
} ;
static int regcache_hw_init ( struct regmap * map )
{
int i , j ;
int ret ;
int count ;
2016-02-02 15:16:51 +03:00
unsigned int reg , val ;
2011-09-19 17:34:00 +04:00
void * tmp_buf ;
if ( ! map - > num_reg_defaults_raw )
return - EINVAL ;
2014-10-09 13:02:57 +04:00
/* calculate the size of reg_defaults */
for ( count = 0 , i = 0 ; i < map - > num_reg_defaults_raw ; i + + )
2016-07-30 00:42:12 +03:00
if ( regmap_readable ( map , i * map - > reg_stride ) & &
! regmap_volatile ( map , i * map - > reg_stride ) )
2014-10-09 13:02:57 +04:00
count + + ;
2016-07-30 00:42:12 +03:00
/* all registers are unreadable or volatile, so just bypass */
2014-10-09 13:02:57 +04:00
if ( ! count ) {
map - > cache_bypass = true ;
return 0 ;
}
map - > num_reg_defaults = count ;
map - > reg_defaults = kmalloc_array ( count , sizeof ( struct reg_default ) ,
GFP_KERNEL ) ;
if ( ! map - > reg_defaults )
return - ENOMEM ;
2011-09-19 17:34:00 +04:00
if ( ! map - > reg_defaults_raw ) {
2015-09-27 01:04:07 +03:00
bool cache_bypass = map - > cache_bypass ;
2011-09-19 17:34:00 +04:00
dev_warn ( map - > dev , " No cache defaults, reading back from HW \n " ) ;
2012-02-17 17:27:26 +04:00
2016-01-14 00:41:12 +03:00
/* Bypass the cache access till data read from HW */
2015-09-27 01:04:07 +03:00
map - > cache_bypass = true ;
2011-09-19 17:34:00 +04:00
tmp_buf = kmalloc ( map - > cache_size_raw , GFP_KERNEL ) ;
2014-10-09 13:02:57 +04:00
if ( ! tmp_buf ) {
ret = - ENOMEM ;
goto err_free ;
}
2013-02-21 22:39:47 +04:00
ret = regmap_raw_read ( map , 0 , tmp_buf ,
2016-01-14 00:41:12 +03:00
map - > cache_size_raw ) ;
2012-02-17 17:27:26 +04:00
map - > cache_bypass = cache_bypass ;
2016-02-02 15:16:51 +03:00
if ( ret = = 0 ) {
map - > reg_defaults_raw = tmp_buf ;
map - > cache_free = 1 ;
} else {
kfree ( tmp_buf ) ;
}
2011-09-19 17:34:00 +04:00
}
/* fill the reg_defaults */
for ( i = 0 , j = 0 ; i < map - > num_reg_defaults_raw ; i + + ) {
2016-02-02 15:16:51 +03:00
reg = i * map - > reg_stride ;
if ( ! regmap_readable ( map , reg ) )
continue ;
if ( regmap_volatile ( map , reg ) )
2011-09-19 17:34:00 +04:00
continue ;
2016-02-02 15:16:51 +03:00
if ( map - > reg_defaults_raw ) {
val = regcache_get_val ( map , map - > reg_defaults_raw , i ) ;
} else {
bool cache_bypass = map - > cache_bypass ;
map - > cache_bypass = true ;
ret = regmap_read ( map , reg , & val ) ;
map - > cache_bypass = cache_bypass ;
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to read %d: %d \n " ,
reg , ret ) ;
goto err_free ;
}
}
map - > reg_defaults [ j ] . reg = reg ;
2011-09-19 17:34:00 +04:00
map - > reg_defaults [ j ] . def = val ;
j + + ;
}
return 0 ;
2011-11-14 13:40:16 +04:00
err_free :
2014-10-09 13:02:57 +04:00
kfree ( map - > reg_defaults ) ;
2011-11-14 13:40:16 +04:00
return ret ;
2011-09-19 17:34:00 +04:00
}
2011-11-16 19:28:16 +04:00
int regcache_init ( struct regmap * map , const struct regmap_config * config )
2011-09-19 17:34:00 +04:00
{
int ret ;
int i ;
void * tmp_buf ;
2011-09-19 19:08:03 +04:00
if ( map - > cache_type = = REGCACHE_NONE ) {
2015-12-11 06:23:19 +03:00
if ( config - > reg_defaults | | config - > num_reg_defaults_raw )
dev_warn ( map - > dev ,
" No cache used with register defaults set! \n " ) ;
2011-09-19 19:08:03 +04:00
map - > cache_bypass = true ;
2011-09-19 17:34:00 +04:00
return 0 ;
2011-09-19 19:08:03 +04:00
}
2011-09-19 17:34:00 +04:00
2015-12-11 06:23:20 +03:00
if ( config - > reg_defaults & & ! config - > num_reg_defaults ) {
dev_err ( map - > dev ,
" Register defaults are set without the number! \n " ) ;
return - EINVAL ;
}
2015-12-11 06:23:19 +03:00
for ( i = 0 ; i < config - > num_reg_defaults ; i + + )
if ( config - > reg_defaults [ i ] . reg % map - > reg_stride )
return - EINVAL ;
2011-09-19 17:34:00 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( cache_types ) ; i + + )
if ( cache_types [ i ] - > type = = map - > cache_type )
break ;
if ( i = = ARRAY_SIZE ( cache_types ) ) {
dev_err ( map - > dev , " Could not match compress type: %d \n " ,
map - > cache_type ) ;
return - EINVAL ;
}
2011-11-16 19:28:16 +04:00
map - > num_reg_defaults = config - > num_reg_defaults ;
map - > num_reg_defaults_raw = config - > num_reg_defaults_raw ;
map - > reg_defaults_raw = config - > reg_defaults_raw ;
2011-11-16 23:34:03 +04:00
map - > cache_word_size = DIV_ROUND_UP ( config - > val_bits , 8 ) ;
map - > cache_size_raw = map - > cache_word_size * config - > num_reg_defaults_raw ;
2011-11-16 19:28:16 +04:00
2011-09-19 17:34:00 +04:00
map - > cache = NULL ;
map - > cache_ops = cache_types [ i ] ;
if ( ! map - > cache_ops - > read | |
! map - > cache_ops - > write | |
! map - > cache_ops - > name )
return - EINVAL ;
/* We still need to ensure that the reg_defaults
* won ' t vanish from under us . We ' ll need to make
* a copy of it .
*/
2011-11-16 19:28:17 +04:00
if ( config - > reg_defaults ) {
tmp_buf = kmemdup ( config - > reg_defaults , map - > num_reg_defaults *
2011-09-19 17:34:00 +04:00
sizeof ( struct reg_default ) , GFP_KERNEL ) ;
if ( ! tmp_buf )
return - ENOMEM ;
map - > reg_defaults = tmp_buf ;
2011-10-09 16:13:58 +04:00
} else if ( map - > num_reg_defaults_raw ) {
2011-09-29 18:24:54 +04:00
/* Some devices such as PMICs don't have cache defaults,
2011-09-19 17:34:00 +04:00
* we cope with this by reading back the HW registers and
* crafting the cache defaults by hand .
*/
ret = regcache_hw_init ( map ) ;
if ( ret < 0 )
return ret ;
2014-10-09 13:02:57 +04:00
if ( map - > cache_bypass )
return 0 ;
2011-09-19 17:34:00 +04:00
}
if ( ! map - > max_register )
map - > max_register = map - > num_reg_defaults_raw ;
if ( map - > cache_ops - > init ) {
dev_dbg ( map - > dev , " Initializing %s cache \n " ,
map - > cache_ops - > name ) ;
2011-11-14 13:40:17 +04:00
ret = map - > cache_ops - > init ( map ) ;
if ( ret )
goto err_free ;
2011-09-19 17:34:00 +04:00
}
return 0 ;
2011-11-14 13:40:17 +04:00
err_free :
kfree ( map - > reg_defaults ) ;
if ( map - > cache_free )
kfree ( map - > reg_defaults_raw ) ;
return ret ;
2011-09-19 17:34:00 +04:00
}
void regcache_exit ( struct regmap * map )
{
if ( map - > cache_type = = REGCACHE_NONE )
return ;
BUG_ON ( ! map - > cache_ops ) ;
kfree ( map - > reg_defaults ) ;
if ( map - > cache_free )
kfree ( map - > reg_defaults_raw ) ;
if ( map - > cache_ops - > exit ) {
dev_dbg ( map - > dev , " Destroying %s cache \n " ,
map - > cache_ops - > name ) ;
map - > cache_ops - > exit ( map ) ;
}
}
/**
2017-01-12 14:17:39 +03:00
* regcache_read - Fetch the value of a given register from the cache .
2011-09-19 17:34:00 +04:00
*
* @ map : map to configure .
* @ reg : The register index .
* @ value : The value to be returned .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_read ( struct regmap * map ,
unsigned int reg , unsigned int * value )
{
2011-11-30 18:27:08 +04:00
int ret ;
2011-09-19 17:34:00 +04:00
if ( map - > cache_type = = REGCACHE_NONE )
return - ENOSYS ;
BUG_ON ( ! map - > cache_ops ) ;
2011-11-30 18:27:08 +04:00
if ( ! regmap_volatile ( map , reg ) ) {
ret = map - > cache_ops - > read ( map , reg , value ) ;
if ( ret = = 0 )
2015-03-09 14:20:13 +03:00
trace_regmap_reg_read_cache ( map , reg , * value ) ;
2011-11-30 18:27:08 +04:00
return ret ;
}
2011-09-19 17:34:00 +04:00
return - EINVAL ;
}
/**
2017-01-12 14:17:39 +03:00
* regcache_write - Set the value of a given register in the cache .
2011-09-19 17:34:00 +04:00
*
* @ map : map to configure .
* @ reg : The register index .
* @ value : The new register value .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_write ( struct regmap * map ,
unsigned int reg , unsigned int value )
{
if ( map - > cache_type = = REGCACHE_NONE )
return 0 ;
BUG_ON ( ! map - > cache_ops ) ;
if ( ! regmap_volatile ( map , reg ) )
return map - > cache_ops - > write ( map , reg , value ) ;
return 0 ;
}
2015-05-06 01:14:13 +03:00
static bool regcache_reg_needs_sync ( struct regmap * map , unsigned int reg ,
unsigned int val )
{
int ret ;
2015-05-06 01:14:14 +03:00
/* If we don't know the chip just got reset, then sync everything. */
if ( ! map - > no_sync_defaults )
return true ;
2015-05-06 01:14:13 +03:00
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg ( map , reg ) ;
if ( ret > = 0 & & val = = map - > reg_defaults [ ret ] . def )
return false ;
return true ;
}
2013-06-03 02:15:26 +04:00
static int regcache_default_sync ( struct regmap * map , unsigned int min ,
unsigned int max )
{
unsigned int reg ;
2014-03-19 00:45:08 +04:00
for ( reg = min ; reg < = max ; reg + = map - > reg_stride ) {
2013-06-03 02:15:26 +04:00
unsigned int val ;
int ret ;
2014-03-19 00:45:09 +04:00
if ( regmap_volatile ( map , reg ) | |
! regmap_writeable ( map , reg ) )
2013-06-03 02:15:26 +04:00
continue ;
ret = regcache_read ( map , reg , & val ) ;
if ( ret )
return ret ;
2015-05-06 01:14:13 +03:00
if ( ! regcache_reg_needs_sync ( map , reg , val ) )
2013-06-03 02:15:26 +04:00
continue ;
2015-09-27 01:04:07 +03:00
map - > cache_bypass = true ;
2013-06-03 02:15:26 +04:00
ret = _regmap_write ( map , reg , val ) ;
2015-09-27 01:04:07 +03:00
map - > cache_bypass = false ;
2014-09-16 15:04:14 +04:00
if ( ret ) {
dev_err ( map - > dev , " Unable to sync register %#x. %d \n " ,
reg , ret ) ;
2013-06-03 02:15:26 +04:00
return ret ;
2014-09-16 15:04:14 +04:00
}
2013-06-03 02:15:26 +04:00
dev_dbg ( map - > dev , " Synced register %#x, value %#x \n " , reg , val ) ;
}
return 0 ;
}
2011-09-19 17:34:00 +04:00
/**
2017-01-12 14:17:39 +03:00
* regcache_sync - Sync the register cache with the hardware .
2011-09-19 17:34:00 +04:00
*
* @ map : map to configure .
*
* Any registers that should not be synced should be marked as
* volatile . In general drivers can choose not to use the provided
* syncing functionality if they so require .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_sync ( struct regmap * map )
{
2011-09-27 14:25:06 +04:00
int ret = 0 ;
unsigned int i ;
2011-09-19 17:34:04 +04:00
const char * name ;
2015-09-27 01:04:07 +03:00
bool bypass ;
2011-09-19 17:34:04 +04:00
2013-06-03 02:15:26 +04:00
BUG_ON ( ! map - > cache_ops ) ;
2011-09-19 17:34:00 +04:00
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-09-29 17:36:26 +04:00
/* Remember the initial bypass state */
bypass = map - > cache_bypass ;
2011-09-27 14:25:06 +04:00
dev_dbg ( map - > dev , " Syncing %s cache \n " ,
map - > cache_ops - > name ) ;
name = map - > cache_ops - > name ;
2015-03-09 14:20:13 +03:00
trace_regcache_sync ( map , name , " start " ) ;
2012-01-21 16:01:14 +04:00
2011-10-26 12:34:22 +04:00
if ( ! map - > cache_dirty )
goto out ;
2012-01-26 01:06:33 +04:00
2013-10-11 00:06:32 +04:00
map - > async = true ;
2012-01-21 16:01:14 +04:00
/* Apply any patch first */
2015-09-27 01:04:07 +03:00
map - > cache_bypass = true ;
2012-01-21 16:01:14 +04:00
for ( i = 0 ; i < map - > patch_regs ; i + + ) {
ret = _regmap_write ( map , map - > patch [ i ] . reg , map - > patch [ i ] . def ) ;
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to write %x = %x: %d \n " ,
map - > patch [ i ] . reg , map - > patch [ i ] . def , ret ) ;
goto out ;
}
}
2015-09-27 01:04:07 +03:00
map - > cache_bypass = false ;
2012-01-21 16:01:14 +04:00
2013-06-03 02:15:26 +04:00
if ( map - > cache_ops - > sync )
ret = map - > cache_ops - > sync ( map , 0 , map - > max_register ) ;
else
ret = regcache_default_sync ( map , 0 , map - > max_register ) ;
2011-09-27 14:25:06 +04:00
2012-02-24 02:05:59 +04:00
if ( ret = = 0 )
map - > cache_dirty = false ;
2011-09-27 14:25:06 +04:00
out :
2011-09-29 17:36:26 +04:00
/* Restore the bypass state */
2013-10-11 00:06:32 +04:00
map - > async = false ;
2011-09-29 17:36:26 +04:00
map - > cache_bypass = bypass ;
2015-05-06 01:14:14 +03:00
map - > no_sync_defaults = false ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-09-27 14:25:06 +04:00
2013-10-11 00:06:32 +04:00
regmap_async_complete ( map ) ;
2015-03-09 14:20:13 +03:00
trace_regcache_sync ( map , name , " stop " ) ;
2013-10-11 00:06:32 +04:00
2011-09-27 14:25:06 +04:00
return ret ;
2011-09-19 17:34:00 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_sync ) ;
2012-02-24 00:53:37 +04:00
/**
2017-01-12 14:17:39 +03:00
* regcache_sync_region - Sync part of the register cache with the hardware .
2012-02-24 00:53:37 +04:00
*
* @ map : map to sync .
* @ min : first register to sync
* @ max : last register to sync
*
* Write all non - default register values in the specified region to
* the hardware .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_sync_region ( struct regmap * map , unsigned int min ,
unsigned int max )
{
int ret = 0 ;
const char * name ;
2015-09-27 01:04:07 +03:00
bool bypass ;
2012-02-24 00:53:37 +04:00
2013-06-03 02:15:26 +04:00
BUG_ON ( ! map - > cache_ops ) ;
2012-02-24 00:53:37 +04:00
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2012-02-24 00:53:37 +04:00
/* Remember the initial bypass state */
bypass = map - > cache_bypass ;
name = map - > cache_ops - > name ;
dev_dbg ( map - > dev , " Syncing %s cache from %d-%d \n " , name , min , max ) ;
2015-03-09 14:20:13 +03:00
trace_regcache_sync ( map , name , " start region " ) ;
2012-02-24 00:53:37 +04:00
if ( ! map - > cache_dirty )
goto out ;
2013-10-11 00:06:32 +04:00
map - > async = true ;
2013-06-03 02:15:26 +04:00
if ( map - > cache_ops - > sync )
ret = map - > cache_ops - > sync ( map , min , max ) ;
else
ret = regcache_default_sync ( map , min , max ) ;
2012-02-24 00:53:37 +04:00
out :
/* Restore the bypass state */
map - > cache_bypass = bypass ;
2013-10-11 00:06:32 +04:00
map - > async = false ;
2015-05-06 01:14:14 +03:00
map - > no_sync_defaults = false ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2012-02-24 00:53:37 +04:00
2013-10-11 00:06:32 +04:00
regmap_async_complete ( map ) ;
2015-03-09 14:20:13 +03:00
trace_regcache_sync ( map , name , " stop region " ) ;
2013-10-11 00:06:32 +04:00
2012-02-24 00:53:37 +04:00
return ret ;
}
2012-04-03 16:08:53 +04:00
EXPORT_SYMBOL_GPL ( regcache_sync_region ) ;
2012-02-24 00:53:37 +04:00
2013-05-08 16:55:22 +04:00
/**
2017-01-12 14:17:39 +03:00
* regcache_drop_region - Discard part of the register cache
2013-05-08 16:55:22 +04:00
*
* @ map : map to operate on
* @ min : first register to discard
* @ max : last register to discard
*
* Discard part of the register cache .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_drop_region ( struct regmap * map , unsigned int min ,
unsigned int max )
{
int ret = 0 ;
2013-08-29 12:26:34 +04:00
if ( ! map - > cache_ops | | ! map - > cache_ops - > drop )
2013-05-08 16:55:22 +04:00
return - EINVAL ;
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2013-05-08 16:55:22 +04:00
2015-03-09 14:20:13 +03:00
trace_regcache_drop_region ( map , min , max ) ;
2013-05-08 16:55:22 +04:00
2013-08-29 12:26:34 +04:00
ret = map - > cache_ops - > drop ( map , min , max ) ;
2013-05-08 16:55:22 +04:00
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2013-05-08 16:55:22 +04:00
return ret ;
}
EXPORT_SYMBOL_GPL ( regcache_drop_region ) ;
2011-09-19 21:22:14 +04:00
/**
2017-01-12 14:17:39 +03:00
* regcache_cache_only - Put a register map into cache only mode
2011-09-19 21:22:14 +04:00
*
* @ map : map to configure
2017-01-12 14:17:39 +03:00
* @ enable : flag if changes should be written to the hardware
2011-09-19 21:22:14 +04:00
*
* When a register map is marked as cache only writes to the register
* map API will only update the register cache , they will not cause
* any hardware changes . This is useful for allowing portions of
* drivers to act as though the device were functioning as normal when
* it is disabled for power saving reasons .
*/
void regcache_cache_only ( struct regmap * map , bool enable )
{
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-09-29 17:36:28 +04:00
WARN_ON ( map - > cache_bypass & & enable ) ;
2011-09-19 21:22:14 +04:00
map - > cache_only = enable ;
2015-03-09 14:20:13 +03:00
trace_regmap_cache_only ( map , enable ) ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-09-19 21:22:14 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_cache_only ) ;
2011-10-26 12:34:22 +04:00
/**
2017-01-12 14:17:39 +03:00
* regcache_mark_dirty - Indicate that HW registers were reset to default values
2011-10-26 12:34:22 +04:00
*
* @ map : map to mark
*
2015-05-06 01:14:14 +03:00
* Inform regcache that the device has been powered down or reset , so that
* on resume , regcache_sync ( ) knows to write out all non - default values
* stored in the cache .
*
* If this function is not called , regcache_sync ( ) will assume that
* the hardware state still matches the cache state , modulo any writes that
* happened when cache_only was true .
2011-10-26 12:34:22 +04:00
*/
void regcache_mark_dirty ( struct regmap * map )
{
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-10-26 12:34:22 +04:00
map - > cache_dirty = true ;
2015-05-06 01:14:14 +03:00
map - > no_sync_defaults = true ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-10-26 12:34:22 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_mark_dirty ) ;
2011-09-29 17:36:27 +04:00
/**
2017-01-12 14:17:39 +03:00
* regcache_cache_bypass - Put a register map into cache bypass mode
2011-09-29 17:36:27 +04:00
*
* @ map : map to configure
2017-01-12 14:17:39 +03:00
* @ enable : flag if changes should not be written to the cache
2011-09-29 17:36:27 +04:00
*
* When a register map is marked with the cache bypass option , writes
* to the register map API will only update the hardware and not the
* the cache directly . This is useful when syncing the cache back to
* the hardware .
*/
void regcache_cache_bypass ( struct regmap * map , bool enable )
{
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-09-29 17:36:28 +04:00
WARN_ON ( map - > cache_only & & enable ) ;
2011-09-29 17:36:27 +04:00
map - > cache_bypass = enable ;
2015-03-09 14:20:13 +03:00
trace_regmap_cache_bypass ( map , enable ) ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-09-29 17:36:27 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_cache_bypass ) ;
2013-02-21 22:03:13 +04:00
bool regcache_set_val ( struct regmap * map , void * base , unsigned int idx ,
unsigned int val )
2011-09-19 17:34:00 +04:00
{
2013-02-21 22:07:01 +04:00
if ( regcache_get_val ( map , base , idx ) = = val )
return true ;
2013-02-21 22:39:47 +04:00
/* Use device native format if possible */
if ( map - > format . format_val ) {
map - > format . format_val ( base + ( map - > cache_word_size * idx ) ,
val , 0 ) ;
return false ;
}
2013-02-21 22:03:13 +04:00
switch ( map - > cache_word_size ) {
2011-09-19 17:34:00 +04:00
case 1 : {
u8 * cache = base ;
2015-12-09 08:09:06 +03:00
2011-09-19 17:34:00 +04:00
cache [ idx ] = val ;
break ;
}
case 2 : {
u16 * cache = base ;
2015-12-09 08:09:06 +03:00
2011-09-19 17:34:00 +04:00
cache [ idx ] = val ;
break ;
}
2012-02-18 03:58:25 +04:00
case 4 : {
u32 * cache = base ;
2015-12-09 08:09:06 +03:00
2012-02-18 03:58:25 +04:00
cache [ idx ] = val ;
break ;
}
2015-12-09 08:09:07 +03:00
# ifdef CONFIG_64BIT
case 8 : {
u64 * cache = base ;
2012-02-18 03:58:25 +04:00
cache [ idx ] = val ;
break ;
}
2015-12-09 08:09:07 +03:00
# endif
2011-09-19 17:34:00 +04:00
default :
BUG ( ) ;
}
return false ;
}
2013-02-21 22:03:13 +04:00
unsigned int regcache_get_val ( struct regmap * map , const void * base ,
unsigned int idx )
2011-09-19 17:34:00 +04:00
{
if ( ! base )
return - EINVAL ;
2013-02-21 22:39:47 +04:00
/* Use device native format if possible */
if ( map - > format . parse_val )
2013-03-13 23:29:36 +04:00
return map - > format . parse_val ( regcache_get_val_addr ( map , base ,
idx ) ) ;
2013-02-21 22:39:47 +04:00
2013-02-21 22:03:13 +04:00
switch ( map - > cache_word_size ) {
2011-09-19 17:34:00 +04:00
case 1 : {
const u8 * cache = base ;
2015-12-09 08:09:06 +03:00
2011-09-19 17:34:00 +04:00
return cache [ idx ] ;
}
case 2 : {
const u16 * cache = base ;
2015-12-09 08:09:06 +03:00
2011-09-19 17:34:00 +04:00
return cache [ idx ] ;
}
2012-02-18 03:58:25 +04:00
case 4 : {
const u32 * cache = base ;
2015-12-09 08:09:06 +03:00
2012-02-18 03:58:25 +04:00
return cache [ idx ] ;
}
2015-12-09 08:09:07 +03:00
# ifdef CONFIG_64BIT
case 8 : {
const u64 * cache = base ;
2012-02-18 03:58:25 +04:00
return cache [ idx ] ;
}
2015-12-09 08:09:07 +03:00
# endif
2011-09-19 17:34:00 +04:00
default :
BUG ( ) ;
}
/* unreachable */
return - 1 ;
}
2011-10-05 01:05:47 +04:00
static int regcache_default_cmp ( const void * a , const void * b )
2011-10-03 13:50:14 +04:00
{
const struct reg_default * _a = a ;
const struct reg_default * _b = b ;
return _a - > reg - _b - > reg ;
}
2011-10-05 01:05:47 +04:00
int regcache_lookup_reg ( struct regmap * map , unsigned int reg )
{
struct reg_default key ;
struct reg_default * r ;
key . reg = reg ;
key . def = 0 ;
r = bsearch ( & key , map - > reg_defaults , map - > num_reg_defaults ,
sizeof ( struct reg_default ) , regcache_default_cmp ) ;
if ( r )
return r - map - > reg_defaults ;
else
2011-10-09 16:23:31 +04:00
return - ENOENT ;
2011-10-05 01:05:47 +04:00
}
2013-03-29 23:32:28 +04:00
2013-08-29 12:26:34 +04:00
static bool regcache_reg_present ( unsigned long * cache_present , unsigned int idx )
{
if ( ! cache_present )
return true ;
return test_bit ( idx , cache_present ) ;
}
2013-03-30 00:12:21 +04:00
static int regcache_sync_block_single ( struct regmap * map , void * block ,
2013-08-29 12:26:34 +04:00
unsigned long * cache_present ,
2013-03-30 00:12:21 +04:00
unsigned int block_base ,
unsigned int start , unsigned int end )
{
unsigned int i , regtmp , val ;
int ret ;
for ( i = start ; i < end ; i + + ) {
regtmp = block_base + ( i * map - > reg_stride ) ;
2015-03-04 17:29:17 +03:00
if ( ! regcache_reg_present ( cache_present , i ) | |
! regmap_writeable ( map , regtmp ) )
2013-03-30 00:12:21 +04:00
continue ;
val = regcache_get_val ( map , block , i ) ;
2015-05-06 01:14:13 +03:00
if ( ! regcache_reg_needs_sync ( map , regtmp , val ) )
2013-03-30 00:12:21 +04:00
continue ;
2015-09-27 01:04:07 +03:00
map - > cache_bypass = true ;
2013-03-30 00:12:21 +04:00
ret = _regmap_write ( map , regtmp , val ) ;
2015-09-27 01:04:07 +03:00
map - > cache_bypass = false ;
2014-09-16 15:04:14 +04:00
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Unable to sync register %#x. %d \n " ,
regtmp , ret ) ;
2013-03-30 00:12:21 +04:00
return ret ;
2014-09-16 15:04:14 +04:00
}
2013-03-30 00:12:21 +04:00
dev_dbg ( map - > dev , " Synced register %#x, value %#x \n " ,
regtmp , val ) ;
}
return 0 ;
}
2013-03-30 00:50:07 +04:00
static int regcache_sync_block_raw_flush ( struct regmap * map , const void * * data ,
unsigned int base , unsigned int cur )
{
size_t val_bytes = map - > format . val_bytes ;
int ret , count ;
if ( * data = = NULL )
return 0 ;
2014-01-25 03:40:39 +04:00
count = ( cur - base ) / map - > reg_stride ;
2013-03-30 00:50:07 +04:00
2013-04-04 20:40:45 +04:00
dev_dbg ( map - > dev , " Writing %zu bytes for %d registers from 0x%x-0x%x \n " ,
2014-01-25 03:40:39 +04:00
count * val_bytes , count , base , cur - map - > reg_stride ) ;
2013-03-30 00:50:07 +04:00
2015-09-27 01:04:07 +03:00
map - > cache_bypass = true ;
2013-03-30 00:50:07 +04:00
2013-10-09 15:28:52 +04:00
ret = _regmap_raw_write ( map , base , * data , count * val_bytes ) ;
2014-09-16 15:04:14 +04:00
if ( ret )
dev_err ( map - > dev , " Unable to sync registers %#x-%#x. %d \n " ,
base , cur - map - > reg_stride , ret ) ;
2013-03-30 00:50:07 +04:00
2015-09-27 01:04:07 +03:00
map - > cache_bypass = false ;
2013-03-30 00:50:07 +04:00
* data = NULL ;
return ret ;
}
2013-04-04 13:06:18 +04:00
static int regcache_sync_block_raw ( struct regmap * map , void * block ,
2013-08-29 12:26:34 +04:00
unsigned long * cache_present ,
2013-03-30 00:12:21 +04:00
unsigned int block_base , unsigned int start ,
unsigned int end )
2013-03-29 23:32:28 +04:00
{
2013-03-30 00:50:07 +04:00
unsigned int i , val ;
unsigned int regtmp = 0 ;
unsigned int base = 0 ;
const void * data = NULL ;
2013-03-29 23:32:28 +04:00
int ret ;
for ( i = start ; i < end ; i + + ) {
regtmp = block_base + ( i * map - > reg_stride ) ;
2015-03-04 17:29:17 +03:00
if ( ! regcache_reg_present ( cache_present , i ) | |
! regmap_writeable ( map , regtmp ) ) {
2013-03-30 00:50:07 +04:00
ret = regcache_sync_block_raw_flush ( map , & data ,
base , regtmp ) ;
if ( ret ! = 0 )
return ret ;
2013-03-29 23:32:28 +04:00
continue ;
2013-03-30 00:50:07 +04:00
}
2013-03-29 23:32:28 +04:00
val = regcache_get_val ( map , block , i ) ;
2015-05-06 01:14:13 +03:00
if ( ! regcache_reg_needs_sync ( map , regtmp , val ) ) {
2013-03-30 00:50:07 +04:00
ret = regcache_sync_block_raw_flush ( map , & data ,
base , regtmp ) ;
if ( ret ! = 0 )
return ret ;
2013-03-29 23:32:28 +04:00
continue ;
2013-03-30 00:50:07 +04:00
}
2013-03-29 23:32:28 +04:00
2013-03-30 00:50:07 +04:00
if ( ! data ) {
data = regcache_get_val_addr ( map , block , i ) ;
base = regtmp ;
}
2013-03-29 23:32:28 +04:00
}
2013-08-05 13:21:29 +04:00
return regcache_sync_block_raw_flush ( map , & data , base , regtmp +
map - > reg_stride ) ;
2013-03-29 23:32:28 +04:00
}
2013-03-30 00:12:21 +04:00
int regcache_sync_block ( struct regmap * map , void * block ,
2013-08-29 12:26:34 +04:00
unsigned long * cache_present ,
2013-03-30 00:12:21 +04:00
unsigned int block_base , unsigned int start ,
unsigned int end )
{
2015-08-21 11:26:42 +03:00
if ( regmap_can_raw_write ( map ) & & ! map - > use_single_write )
2013-08-29 12:26:34 +04:00
return regcache_sync_block_raw ( map , block , cache_present ,
block_base , start , end ) ;
2013-03-30 00:12:21 +04:00
else
2013-08-29 12:26:34 +04:00
return regcache_sync_block_single ( map , block , cache_present ,
block_base , start , end ) ;
2013-03-30 00:12:21 +04:00
}