2011-09-19 17:34:00 +04:00
/*
* Register cache access API
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author : Dimitris Papastamos < dp @ opensource . wolfsonmicro . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/slab.h>
2011-05-27 15:12:15 +04:00
# include <linux/export.h>
2012-01-22 20:23:42 +04:00
# include <linux/device.h>
2011-09-19 17:34:00 +04:00
# include <trace/events/regmap.h>
2011-10-05 01:05:47 +04:00
# include <linux/bsearch.h>
2011-10-03 13:50:14 +04:00
# include <linux/sort.h>
2011-09-19 17:34:00 +04:00
# include "internal.h"
static const struct regcache_ops * cache_types [ ] = {
2011-09-19 17:34:02 +04:00
& regcache_rbtree_ops ,
2011-09-19 17:34:03 +04:00
& regcache_lzo_ops ,
2012-12-19 18:51:55 +04:00
& regcache_flat_ops ,
2011-09-19 17:34:00 +04:00
} ;
static int regcache_hw_init ( struct regmap * map )
{
int i , j ;
int ret ;
int count ;
unsigned int val ;
void * tmp_buf ;
if ( ! map - > num_reg_defaults_raw )
return - EINVAL ;
if ( ! map - > reg_defaults_raw ) {
2012-02-17 17:27:26 +04:00
u32 cache_bypass = map - > cache_bypass ;
2011-09-19 17:34:00 +04:00
dev_warn ( map - > dev , " No cache defaults, reading back from HW \n " ) ;
2012-02-17 17:27:26 +04:00
/* Bypass the cache access till data read from HW*/
map - > cache_bypass = 1 ;
2011-09-19 17:34:00 +04:00
tmp_buf = kmalloc ( map - > cache_size_raw , GFP_KERNEL ) ;
if ( ! tmp_buf )
return - EINVAL ;
2013-02-21 22:39:47 +04:00
ret = regmap_raw_read ( map , 0 , tmp_buf ,
map - > num_reg_defaults_raw ) ;
2012-02-17 17:27:26 +04:00
map - > cache_bypass = cache_bypass ;
2011-09-19 17:34:00 +04:00
if ( ret < 0 ) {
kfree ( tmp_buf ) ;
return ret ;
}
map - > reg_defaults_raw = tmp_buf ;
map - > cache_free = 1 ;
}
/* calculate the size of reg_defaults */
for ( count = 0 , i = 0 ; i < map - > num_reg_defaults_raw ; i + + ) {
2013-02-21 22:03:13 +04:00
val = regcache_get_val ( map , map - > reg_defaults_raw , i ) ;
2012-04-09 23:40:24 +04:00
if ( regmap_volatile ( map , i * map - > reg_stride ) )
2011-09-19 17:34:00 +04:00
continue ;
count + + ;
}
map - > reg_defaults = kmalloc ( count * sizeof ( struct reg_default ) ,
GFP_KERNEL ) ;
2011-11-14 13:40:16 +04:00
if ( ! map - > reg_defaults ) {
ret = - ENOMEM ;
goto err_free ;
}
2011-09-19 17:34:00 +04:00
/* fill the reg_defaults */
map - > num_reg_defaults = count ;
for ( i = 0 , j = 0 ; i < map - > num_reg_defaults_raw ; i + + ) {
2013-02-21 22:03:13 +04:00
val = regcache_get_val ( map , map - > reg_defaults_raw , i ) ;
2012-04-09 23:40:24 +04:00
if ( regmap_volatile ( map , i * map - > reg_stride ) )
2011-09-19 17:34:00 +04:00
continue ;
2012-04-09 23:40:24 +04:00
map - > reg_defaults [ j ] . reg = i * map - > reg_stride ;
2011-09-19 17:34:00 +04:00
map - > reg_defaults [ j ] . def = val ;
j + + ;
}
return 0 ;
2011-11-14 13:40:16 +04:00
err_free :
if ( map - > cache_free )
kfree ( map - > reg_defaults_raw ) ;
return ret ;
2011-09-19 17:34:00 +04:00
}
2011-11-16 19:28:16 +04:00
int regcache_init ( struct regmap * map , const struct regmap_config * config )
2011-09-19 17:34:00 +04:00
{
int ret ;
int i ;
void * tmp_buf ;
2012-04-09 23:40:24 +04:00
for ( i = 0 ; i < config - > num_reg_defaults ; i + + )
if ( config - > reg_defaults [ i ] . reg % map - > reg_stride )
return - EINVAL ;
2011-09-19 19:08:03 +04:00
if ( map - > cache_type = = REGCACHE_NONE ) {
map - > cache_bypass = true ;
2011-09-19 17:34:00 +04:00
return 0 ;
2011-09-19 19:08:03 +04:00
}
2011-09-19 17:34:00 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( cache_types ) ; i + + )
if ( cache_types [ i ] - > type = = map - > cache_type )
break ;
if ( i = = ARRAY_SIZE ( cache_types ) ) {
dev_err ( map - > dev , " Could not match compress type: %d \n " ,
map - > cache_type ) ;
return - EINVAL ;
}
2011-11-16 19:28:16 +04:00
map - > num_reg_defaults = config - > num_reg_defaults ;
map - > num_reg_defaults_raw = config - > num_reg_defaults_raw ;
map - > reg_defaults_raw = config - > reg_defaults_raw ;
2011-11-16 23:34:03 +04:00
map - > cache_word_size = DIV_ROUND_UP ( config - > val_bits , 8 ) ;
map - > cache_size_raw = map - > cache_word_size * config - > num_reg_defaults_raw ;
2011-11-16 19:28:16 +04:00
2011-09-19 17:34:00 +04:00
map - > cache = NULL ;
map - > cache_ops = cache_types [ i ] ;
if ( ! map - > cache_ops - > read | |
! map - > cache_ops - > write | |
! map - > cache_ops - > name )
return - EINVAL ;
/* We still need to ensure that the reg_defaults
* won ' t vanish from under us . We ' ll need to make
* a copy of it .
*/
2011-11-16 19:28:17 +04:00
if ( config - > reg_defaults ) {
2011-09-19 17:34:00 +04:00
if ( ! map - > num_reg_defaults )
return - EINVAL ;
2011-11-16 19:28:17 +04:00
tmp_buf = kmemdup ( config - > reg_defaults , map - > num_reg_defaults *
2011-09-19 17:34:00 +04:00
sizeof ( struct reg_default ) , GFP_KERNEL ) ;
if ( ! tmp_buf )
return - ENOMEM ;
map - > reg_defaults = tmp_buf ;
2011-10-09 16:13:58 +04:00
} else if ( map - > num_reg_defaults_raw ) {
2011-09-29 18:24:54 +04:00
/* Some devices such as PMICs don't have cache defaults,
2011-09-19 17:34:00 +04:00
* we cope with this by reading back the HW registers and
* crafting the cache defaults by hand .
*/
ret = regcache_hw_init ( map ) ;
if ( ret < 0 )
return ret ;
}
if ( ! map - > max_register )
map - > max_register = map - > num_reg_defaults_raw ;
if ( map - > cache_ops - > init ) {
dev_dbg ( map - > dev , " Initializing %s cache \n " ,
map - > cache_ops - > name ) ;
2011-11-14 13:40:17 +04:00
ret = map - > cache_ops - > init ( map ) ;
if ( ret )
goto err_free ;
2011-09-19 17:34:00 +04:00
}
return 0 ;
2011-11-14 13:40:17 +04:00
err_free :
kfree ( map - > reg_defaults ) ;
if ( map - > cache_free )
kfree ( map - > reg_defaults_raw ) ;
return ret ;
2011-09-19 17:34:00 +04:00
}
void regcache_exit ( struct regmap * map )
{
if ( map - > cache_type = = REGCACHE_NONE )
return ;
BUG_ON ( ! map - > cache_ops ) ;
kfree ( map - > reg_defaults ) ;
if ( map - > cache_free )
kfree ( map - > reg_defaults_raw ) ;
if ( map - > cache_ops - > exit ) {
dev_dbg ( map - > dev , " Destroying %s cache \n " ,
map - > cache_ops - > name ) ;
map - > cache_ops - > exit ( map ) ;
}
}
/**
* regcache_read : Fetch the value of a given register from the cache .
*
* @ map : map to configure .
* @ reg : The register index .
* @ value : The value to be returned .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_read ( struct regmap * map ,
unsigned int reg , unsigned int * value )
{
2011-11-30 18:27:08 +04:00
int ret ;
2011-09-19 17:34:00 +04:00
if ( map - > cache_type = = REGCACHE_NONE )
return - ENOSYS ;
BUG_ON ( ! map - > cache_ops ) ;
2011-11-30 18:27:08 +04:00
if ( ! regmap_volatile ( map , reg ) ) {
ret = map - > cache_ops - > read ( map , reg , value ) ;
if ( ret = = 0 )
trace_regmap_reg_read_cache ( map - > dev , reg , * value ) ;
return ret ;
}
2011-09-19 17:34:00 +04:00
return - EINVAL ;
}
/**
* regcache_write : Set the value of a given register in the cache .
*
* @ map : map to configure .
* @ reg : The register index .
* @ value : The new register value .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_write ( struct regmap * map ,
unsigned int reg , unsigned int value )
{
if ( map - > cache_type = = REGCACHE_NONE )
return 0 ;
BUG_ON ( ! map - > cache_ops ) ;
if ( ! regmap_volatile ( map , reg ) )
return map - > cache_ops - > write ( map , reg , value ) ;
return 0 ;
}
2013-06-03 02:15:26 +04:00
static int regcache_default_sync ( struct regmap * map , unsigned int min ,
unsigned int max )
{
unsigned int reg ;
for ( reg = min ; reg < = max ; reg + + ) {
unsigned int val ;
int ret ;
if ( regmap_volatile ( map , reg ) )
continue ;
ret = regcache_read ( map , reg , & val ) ;
if ( ret )
return ret ;
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg ( map , reg ) ;
if ( ret > = 0 & & val = = map - > reg_defaults [ ret ] . def )
continue ;
map - > cache_bypass = 1 ;
ret = _regmap_write ( map , reg , val ) ;
map - > cache_bypass = 0 ;
if ( ret )
return ret ;
dev_dbg ( map - > dev , " Synced register %#x, value %#x \n " , reg , val ) ;
}
return 0 ;
}
2011-09-19 17:34:00 +04:00
/**
* regcache_sync : Sync the register cache with the hardware .
*
* @ map : map to configure .
*
* Any registers that should not be synced should be marked as
* volatile . In general drivers can choose not to use the provided
* syncing functionality if they so require .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_sync ( struct regmap * map )
{
2011-09-27 14:25:06 +04:00
int ret = 0 ;
unsigned int i ;
2011-09-19 17:34:04 +04:00
const char * name ;
2011-09-29 17:36:26 +04:00
unsigned int bypass ;
2011-09-19 17:34:04 +04:00
2013-06-03 02:15:26 +04:00
BUG_ON ( ! map - > cache_ops ) ;
2011-09-19 17:34:00 +04:00
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-09-29 17:36:26 +04:00
/* Remember the initial bypass state */
bypass = map - > cache_bypass ;
2011-09-27 14:25:06 +04:00
dev_dbg ( map - > dev , " Syncing %s cache \n " ,
map - > cache_ops - > name ) ;
name = map - > cache_ops - > name ;
trace_regcache_sync ( map - > dev , name , " start " ) ;
2012-01-21 16:01:14 +04:00
2011-10-26 12:34:22 +04:00
if ( ! map - > cache_dirty )
goto out ;
2012-01-26 01:06:33 +04:00
2013-10-11 00:06:32 +04:00
map - > async = true ;
2012-01-21 16:01:14 +04:00
/* Apply any patch first */
2012-01-26 01:05:48 +04:00
map - > cache_bypass = 1 ;
2012-01-21 16:01:14 +04:00
for ( i = 0 ; i < map - > patch_regs ; i + + ) {
2012-04-09 23:40:24 +04:00
if ( map - > patch [ i ] . reg % map - > reg_stride ) {
ret = - EINVAL ;
goto out ;
}
2012-01-21 16:01:14 +04:00
ret = _regmap_write ( map , map - > patch [ i ] . reg , map - > patch [ i ] . def ) ;
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to write %x = %x: %d \n " ,
map - > patch [ i ] . reg , map - > patch [ i ] . def , ret ) ;
goto out ;
}
}
2012-01-26 01:05:48 +04:00
map - > cache_bypass = 0 ;
2012-01-21 16:01:14 +04:00
2013-06-03 02:15:26 +04:00
if ( map - > cache_ops - > sync )
ret = map - > cache_ops - > sync ( map , 0 , map - > max_register ) ;
else
ret = regcache_default_sync ( map , 0 , map - > max_register ) ;
2011-09-27 14:25:06 +04:00
2012-02-24 02:05:59 +04:00
if ( ret = = 0 )
map - > cache_dirty = false ;
2011-09-27 14:25:06 +04:00
out :
2011-09-29 17:36:26 +04:00
/* Restore the bypass state */
2013-10-11 00:06:32 +04:00
map - > async = false ;
2011-09-29 17:36:26 +04:00
map - > cache_bypass = bypass ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-09-27 14:25:06 +04:00
2013-10-11 00:06:32 +04:00
regmap_async_complete ( map ) ;
trace_regcache_sync ( map - > dev , name , " stop " ) ;
2011-09-27 14:25:06 +04:00
return ret ;
2011-09-19 17:34:00 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_sync ) ;
2012-02-24 00:53:37 +04:00
/**
* regcache_sync_region : Sync part of the register cache with the hardware .
*
* @ map : map to sync .
* @ min : first register to sync
* @ max : last register to sync
*
* Write all non - default register values in the specified region to
* the hardware .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_sync_region ( struct regmap * map , unsigned int min ,
unsigned int max )
{
int ret = 0 ;
const char * name ;
unsigned int bypass ;
2013-06-03 02:15:26 +04:00
BUG_ON ( ! map - > cache_ops ) ;
2012-02-24 00:53:37 +04:00
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2012-02-24 00:53:37 +04:00
/* Remember the initial bypass state */
bypass = map - > cache_bypass ;
name = map - > cache_ops - > name ;
dev_dbg ( map - > dev , " Syncing %s cache from %d-%d \n " , name , min , max ) ;
trace_regcache_sync ( map - > dev , name , " start region " ) ;
if ( ! map - > cache_dirty )
goto out ;
2013-10-11 00:06:32 +04:00
map - > async = true ;
2013-06-03 02:15:26 +04:00
if ( map - > cache_ops - > sync )
ret = map - > cache_ops - > sync ( map , min , max ) ;
else
ret = regcache_default_sync ( map , min , max ) ;
2012-02-24 00:53:37 +04:00
out :
/* Restore the bypass state */
map - > cache_bypass = bypass ;
2013-10-11 00:06:32 +04:00
map - > async = false ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2012-02-24 00:53:37 +04:00
2013-10-11 00:06:32 +04:00
regmap_async_complete ( map ) ;
trace_regcache_sync ( map - > dev , name , " stop region " ) ;
2012-02-24 00:53:37 +04:00
return ret ;
}
2012-04-03 16:08:53 +04:00
EXPORT_SYMBOL_GPL ( regcache_sync_region ) ;
2012-02-24 00:53:37 +04:00
2013-05-08 16:55:22 +04:00
/**
* regcache_drop_region : Discard part of the register cache
*
* @ map : map to operate on
* @ min : first register to discard
* @ max : last register to discard
*
* Discard part of the register cache .
*
* Return a negative value on failure , 0 on success .
*/
int regcache_drop_region ( struct regmap * map , unsigned int min ,
unsigned int max )
{
int ret = 0 ;
2013-08-29 12:26:34 +04:00
if ( ! map - > cache_ops | | ! map - > cache_ops - > drop )
2013-05-08 16:55:22 +04:00
return - EINVAL ;
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2013-05-08 16:55:22 +04:00
trace_regcache_drop_region ( map - > dev , min , max ) ;
2013-08-29 12:26:34 +04:00
ret = map - > cache_ops - > drop ( map , min , max ) ;
2013-05-08 16:55:22 +04:00
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2013-05-08 16:55:22 +04:00
return ret ;
}
EXPORT_SYMBOL_GPL ( regcache_drop_region ) ;
2011-09-19 21:22:14 +04:00
/**
* regcache_cache_only : Put a register map into cache only mode
*
* @ map : map to configure
* @ cache_only : flag if changes should be written to the hardware
*
* When a register map is marked as cache only writes to the register
* map API will only update the register cache , they will not cause
* any hardware changes . This is useful for allowing portions of
* drivers to act as though the device were functioning as normal when
* it is disabled for power saving reasons .
*/
void regcache_cache_only ( struct regmap * map , bool enable )
{
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-09-29 17:36:28 +04:00
WARN_ON ( map - > cache_bypass & & enable ) ;
2011-09-19 21:22:14 +04:00
map - > cache_only = enable ;
2012-02-24 02:02:57 +04:00
trace_regmap_cache_only ( map - > dev , enable ) ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-09-19 21:22:14 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_cache_only ) ;
2011-10-26 12:34:22 +04:00
/**
* regcache_mark_dirty : Mark the register cache as dirty
*
* @ map : map to mark
*
* Mark the register cache as dirty , for example due to the device
* having been powered down for suspend . If the cache is not marked
* as dirty then the cache sync will be suppressed .
*/
void regcache_mark_dirty ( struct regmap * map )
{
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-10-26 12:34:22 +04:00
map - > cache_dirty = true ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-10-26 12:34:22 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_mark_dirty ) ;
2011-09-29 17:36:27 +04:00
/**
* regcache_cache_bypass : Put a register map into cache bypass mode
*
* @ map : map to configure
2011-10-03 09:54:16 +04:00
* @ cache_bypass : flag if changes should not be written to the hardware
2011-09-29 17:36:27 +04:00
*
* When a register map is marked with the cache bypass option , writes
* to the register map API will only update the hardware and not the
* the cache directly . This is useful when syncing the cache back to
* the hardware .
*/
void regcache_cache_bypass ( struct regmap * map , bool enable )
{
2013-05-23 17:06:15 +04:00
map - > lock ( map - > lock_arg ) ;
2011-09-29 17:36:28 +04:00
WARN_ON ( map - > cache_only & & enable ) ;
2011-09-29 17:36:27 +04:00
map - > cache_bypass = enable ;
2012-02-24 02:02:57 +04:00
trace_regmap_cache_bypass ( map - > dev , enable ) ;
2013-05-23 17:06:15 +04:00
map - > unlock ( map - > lock_arg ) ;
2011-09-29 17:36:27 +04:00
}
EXPORT_SYMBOL_GPL ( regcache_cache_bypass ) ;
2013-02-21 22:03:13 +04:00
bool regcache_set_val ( struct regmap * map , void * base , unsigned int idx ,
unsigned int val )
2011-09-19 17:34:00 +04:00
{
2013-02-21 22:07:01 +04:00
if ( regcache_get_val ( map , base , idx ) = = val )
return true ;
2013-02-21 22:39:47 +04:00
/* Use device native format if possible */
if ( map - > format . format_val ) {
map - > format . format_val ( base + ( map - > cache_word_size * idx ) ,
val , 0 ) ;
return false ;
}
2013-02-21 22:03:13 +04:00
switch ( map - > cache_word_size ) {
2011-09-19 17:34:00 +04:00
case 1 : {
u8 * cache = base ;
cache [ idx ] = val ;
break ;
}
case 2 : {
u16 * cache = base ;
cache [ idx ] = val ;
break ;
}
2012-02-18 03:58:25 +04:00
case 4 : {
u32 * cache = base ;
cache [ idx ] = val ;
break ;
}
2011-09-19 17:34:00 +04:00
default :
BUG ( ) ;
}
return false ;
}
2013-02-21 22:03:13 +04:00
unsigned int regcache_get_val ( struct regmap * map , const void * base ,
unsigned int idx )
2011-09-19 17:34:00 +04:00
{
if ( ! base )
return - EINVAL ;
2013-02-21 22:39:47 +04:00
/* Use device native format if possible */
if ( map - > format . parse_val )
2013-03-13 23:29:36 +04:00
return map - > format . parse_val ( regcache_get_val_addr ( map , base ,
idx ) ) ;
2013-02-21 22:39:47 +04:00
2013-02-21 22:03:13 +04:00
switch ( map - > cache_word_size ) {
2011-09-19 17:34:00 +04:00
case 1 : {
const u8 * cache = base ;
return cache [ idx ] ;
}
case 2 : {
const u16 * cache = base ;
return cache [ idx ] ;
}
2012-02-18 03:58:25 +04:00
case 4 : {
const u32 * cache = base ;
return cache [ idx ] ;
}
2011-09-19 17:34:00 +04:00
default :
BUG ( ) ;
}
/* unreachable */
return - 1 ;
}
2011-10-05 01:05:47 +04:00
static int regcache_default_cmp ( const void * a , const void * b )
2011-10-03 13:50:14 +04:00
{
const struct reg_default * _a = a ;
const struct reg_default * _b = b ;
return _a - > reg - _b - > reg ;
}
2011-10-05 01:05:47 +04:00
int regcache_lookup_reg ( struct regmap * map , unsigned int reg )
{
struct reg_default key ;
struct reg_default * r ;
key . reg = reg ;
key . def = 0 ;
r = bsearch ( & key , map - > reg_defaults , map - > num_reg_defaults ,
sizeof ( struct reg_default ) , regcache_default_cmp ) ;
if ( r )
return r - map - > reg_defaults ;
else
2011-10-09 16:23:31 +04:00
return - ENOENT ;
2011-10-05 01:05:47 +04:00
}
2013-03-29 23:32:28 +04:00
2013-08-29 12:26:34 +04:00
static bool regcache_reg_present ( unsigned long * cache_present , unsigned int idx )
{
if ( ! cache_present )
return true ;
return test_bit ( idx , cache_present ) ;
}
2013-03-30 00:12:21 +04:00
static int regcache_sync_block_single ( struct regmap * map , void * block ,
2013-08-29 12:26:34 +04:00
unsigned long * cache_present ,
2013-03-30 00:12:21 +04:00
unsigned int block_base ,
unsigned int start , unsigned int end )
{
unsigned int i , regtmp , val ;
int ret ;
for ( i = start ; i < end ; i + + ) {
regtmp = block_base + ( i * map - > reg_stride ) ;
2013-08-29 12:26:34 +04:00
if ( ! regcache_reg_present ( cache_present , i ) )
2013-03-30 00:12:21 +04:00
continue ;
val = regcache_get_val ( map , block , i ) ;
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg ( map , regtmp ) ;
if ( ret > = 0 & & val = = map - > reg_defaults [ ret ] . def )
continue ;
map - > cache_bypass = 1 ;
ret = _regmap_write ( map , regtmp , val ) ;
map - > cache_bypass = 0 ;
if ( ret ! = 0 )
return ret ;
dev_dbg ( map - > dev , " Synced register %#x, value %#x \n " ,
regtmp , val ) ;
}
return 0 ;
}
2013-03-30 00:50:07 +04:00
static int regcache_sync_block_raw_flush ( struct regmap * map , const void * * data ,
unsigned int base , unsigned int cur )
{
size_t val_bytes = map - > format . val_bytes ;
int ret , count ;
if ( * data = = NULL )
return 0 ;
count = cur - base ;
2013-04-04 20:40:45 +04:00
dev_dbg ( map - > dev , " Writing %zu bytes for %d registers from 0x%x-0x%x \n " ,
2013-03-30 00:50:07 +04:00
count * val_bytes , count , base , cur - 1 ) ;
map - > cache_bypass = 1 ;
2013-10-09 15:28:52 +04:00
ret = _regmap_raw_write ( map , base , * data , count * val_bytes ) ;
2013-03-30 00:50:07 +04:00
map - > cache_bypass = 0 ;
* data = NULL ;
return ret ;
}
2013-04-04 13:06:18 +04:00
static int regcache_sync_block_raw ( struct regmap * map , void * block ,
2013-08-29 12:26:34 +04:00
unsigned long * cache_present ,
2013-03-30 00:12:21 +04:00
unsigned int block_base , unsigned int start ,
unsigned int end )
2013-03-29 23:32:28 +04:00
{
2013-03-30 00:50:07 +04:00
unsigned int i , val ;
unsigned int regtmp = 0 ;
unsigned int base = 0 ;
const void * data = NULL ;
2013-03-29 23:32:28 +04:00
int ret ;
for ( i = start ; i < end ; i + + ) {
regtmp = block_base + ( i * map - > reg_stride ) ;
2013-08-29 12:26:34 +04:00
if ( ! regcache_reg_present ( cache_present , i ) ) {
2013-03-30 00:50:07 +04:00
ret = regcache_sync_block_raw_flush ( map , & data ,
base , regtmp ) ;
if ( ret ! = 0 )
return ret ;
2013-03-29 23:32:28 +04:00
continue ;
2013-03-30 00:50:07 +04:00
}
2013-03-29 23:32:28 +04:00
val = regcache_get_val ( map , block , i ) ;
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg ( map , regtmp ) ;
2013-03-30 00:50:07 +04:00
if ( ret > = 0 & & val = = map - > reg_defaults [ ret ] . def ) {
ret = regcache_sync_block_raw_flush ( map , & data ,
base , regtmp ) ;
if ( ret ! = 0 )
return ret ;
2013-03-29 23:32:28 +04:00
continue ;
2013-03-30 00:50:07 +04:00
}
2013-03-29 23:32:28 +04:00
2013-03-30 00:50:07 +04:00
if ( ! data ) {
data = regcache_get_val_addr ( map , block , i ) ;
base = regtmp ;
}
2013-03-29 23:32:28 +04:00
}
2013-08-05 13:21:29 +04:00
return regcache_sync_block_raw_flush ( map , & data , base , regtmp +
map - > reg_stride ) ;
2013-03-29 23:32:28 +04:00
}
2013-03-30 00:12:21 +04:00
int regcache_sync_block ( struct regmap * map , void * block ,
2013-08-29 12:26:34 +04:00
unsigned long * cache_present ,
2013-03-30 00:12:21 +04:00
unsigned int block_base , unsigned int start ,
unsigned int end )
{
if ( regmap_can_raw_write ( map ) )
2013-08-29 12:26:34 +04:00
return regcache_sync_block_raw ( map , block , cache_present ,
block_base , start , end ) ;
2013-03-30 00:12:21 +04:00
else
2013-08-29 12:26:34 +04:00
return regcache_sync_block_single ( map , block , cache_present ,
block_base , start , end ) ;
2013-03-30 00:12:21 +04:00
}