2010-10-18 21:32:58 +09:00
/*
* Helper routines for SuperH Clock Pulse Generator blocks ( CPG ) .
*
* Copyright ( C ) 2010 Magnus Damm
2012-04-11 12:05:50 +09:00
* Copyright ( C ) 2010 - 2012 Paul Mundt
2010-10-18 21:32:58 +09:00
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
2010-05-11 13:29:34 +00:00
# include <linux/clk.h>
# include <linux/compiler.h>
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/sh_clk.h>
2012-05-25 16:34:48 +09:00
# define CPG_CKSTP_BIT BIT(8)
2012-04-12 19:50:40 +09:00
static unsigned int sh_clk_read ( struct clk * clk )
2010-05-11 13:29:34 +00:00
{
2012-04-11 12:05:50 +09:00
if ( clk - > flags & CLK_ENABLE_REG_8BIT )
2012-04-12 19:50:40 +09:00
return ioread8 ( clk - > mapped_reg ) ;
2012-04-11 12:05:50 +09:00
else if ( clk - > flags & CLK_ENABLE_REG_16BIT )
2012-04-12 19:50:40 +09:00
return ioread16 ( clk - > mapped_reg ) ;
2012-04-11 12:05:50 +09:00
2012-04-12 19:50:40 +09:00
return ioread32 ( clk - > mapped_reg ) ;
2010-05-11 13:29:34 +00:00
}
2012-04-12 19:50:40 +09:00
static void sh_clk_write ( int value , struct clk * clk )
2010-05-11 13:29:34 +00:00
{
2012-04-11 12:05:50 +09:00
if ( clk - > flags & CLK_ENABLE_REG_8BIT )
2012-04-12 19:50:40 +09:00
iowrite8 ( value , clk - > mapped_reg ) ;
2012-04-11 12:05:50 +09:00
else if ( clk - > flags & CLK_ENABLE_REG_16BIT )
2012-04-12 19:50:40 +09:00
iowrite16 ( value , clk - > mapped_reg ) ;
2012-04-11 12:05:50 +09:00
else
2012-04-12 19:50:40 +09:00
iowrite32 ( value , clk - > mapped_reg ) ;
}
static int sh_clk_mstp_enable ( struct clk * clk )
{
sh_clk_write ( sh_clk_read ( clk ) & ~ ( 1 < < clk - > enable_bit ) , clk ) ;
return 0 ;
}
static void sh_clk_mstp_disable ( struct clk * clk )
{
sh_clk_write ( sh_clk_read ( clk ) | ( 1 < < clk - > enable_bit ) , clk ) ;
2010-05-11 13:29:34 +00:00
}
2012-04-11 12:05:50 +09:00
static struct sh_clk_ops sh_clk_mstp_clk_ops = {
. enable = sh_clk_mstp_enable ,
. disable = sh_clk_mstp_disable ,
2010-05-11 13:29:34 +00:00
. recalc = followparent_recalc ,
} ;
2012-04-11 12:05:50 +09:00
int __init sh_clk_mstp_register ( struct clk * clks , int nr )
2010-05-11 13:29:34 +00:00
{
struct clk * clkp ;
int ret = 0 ;
int k ;
for ( k = 0 ; ! ret & & ( k < nr ) ; k + + ) {
clkp = clks + k ;
2012-04-11 12:05:50 +09:00
clkp - > ops = & sh_clk_mstp_clk_ops ;
2010-05-11 13:29:34 +00:00
ret | = clk_register ( clkp ) ;
}
return ret ;
}
2012-05-25 14:59:26 +09:00
/*
* Div / mult table lookup helpers
*/
static inline struct clk_div_table * clk_to_div_table ( struct clk * clk )
{
return clk - > priv ;
}
static inline struct clk_div_mult_table * clk_to_div_mult_table ( struct clk * clk )
{
return clk_to_div_table ( clk ) - > div_mult_table ;
}
2012-05-25 15:26:01 +09:00
/*
* Common div ops
*/
static long sh_clk_div_round_rate ( struct clk * clk , unsigned long rate )
{
return clk_rate_table_round ( clk , clk - > freq_table , rate ) ;
}
static unsigned long sh_clk_div_recalc ( struct clk * clk )
{
struct clk_div_mult_table * table = clk_to_div_mult_table ( clk ) ;
unsigned int idx ;
clk_rate_table_build ( clk , clk - > freq_table , table - > nr_divisors ,
table , clk - > arch_flags ? & clk - > arch_flags : NULL ) ;
idx = ( sh_clk_read ( clk ) > > clk - > enable_bit ) & clk - > div_mask ;
return clk - > freq_table [ idx ] . frequency ;
}
2012-05-25 15:52:10 +09:00
static int sh_clk_div_set_rate ( struct clk * clk , unsigned long rate )
{
struct clk_div_table * dt = clk_to_div_table ( clk ) ;
unsigned long value ;
int idx ;
idx = clk_rate_table_find ( clk , clk - > freq_table , rate ) ;
if ( idx < 0 )
return idx ;
value = sh_clk_read ( clk ) ;
value & = ~ ( clk - > div_mask < < clk - > enable_bit ) ;
value | = ( idx < < clk - > enable_bit ) ;
sh_clk_write ( value , clk ) ;
/* XXX: Should use a post-change notifier */
if ( dt - > kick )
dt - > kick ( clk ) ;
return 0 ;
}
2012-05-25 16:34:48 +09:00
static int sh_clk_div_enable ( struct clk * clk )
{
2012-11-25 22:01:46 -08:00
if ( clk - > div_mask = = SH_CLK_DIV6_MSK ) {
int ret = sh_clk_div_set_rate ( clk , clk - > rate ) ;
if ( ret < 0 )
return ret ;
}
2012-05-25 16:34:48 +09:00
sh_clk_write ( sh_clk_read ( clk ) & ~ CPG_CKSTP_BIT , clk ) ;
return 0 ;
}
static void sh_clk_div_disable ( struct clk * clk )
{
unsigned int val ;
val = sh_clk_read ( clk ) ;
val | = CPG_CKSTP_BIT ;
/*
* div6 clocks require the divisor field to be non - zero or the
* above CKSTP toggle silently fails . Ensure that the divisor
* array is reset to its initial state on disable .
*/
if ( clk - > flags & CLK_MASK_DIV_ON_DISABLE )
val | = clk - > div_mask ;
sh_clk_write ( val , clk ) ;
}
2012-05-25 16:43:42 +09:00
static struct sh_clk_ops sh_clk_div_clk_ops = {
. recalc = sh_clk_div_recalc ,
. set_rate = sh_clk_div_set_rate ,
. round_rate = sh_clk_div_round_rate ,
} ;
static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
. recalc = sh_clk_div_recalc ,
. set_rate = sh_clk_div_set_rate ,
. round_rate = sh_clk_div_round_rate ,
. enable = sh_clk_div_enable ,
. disable = sh_clk_div_disable ,
} ;
2012-05-25 16:55:05 +09:00
static int __init sh_clk_init_parent ( struct clk * clk )
{
u32 val ;
if ( clk - > parent )
return 0 ;
if ( ! clk - > parent_table | | ! clk - > parent_num )
return 0 ;
if ( ! clk - > src_width ) {
pr_err ( " sh_clk_init_parent: cannot select parent clock \n " ) ;
return - EINVAL ;
}
val = ( sh_clk_read ( clk ) > > clk - > src_shift ) ;
val & = ( 1 < < clk - > src_width ) - 1 ;
if ( val > = clk - > parent_num ) {
pr_err ( " sh_clk_init_parent: parent table size failed \n " ) ;
return - EINVAL ;
}
clk_reparent ( clk , clk - > parent_table [ val ] ) ;
if ( ! clk - > parent ) {
pr_err ( " sh_clk_init_parent: unable to set parent " ) ;
return - EINVAL ;
}
return 0 ;
}
static int __init sh_clk_div_register_ops ( struct clk * clks , int nr ,
struct clk_div_table * table , struct sh_clk_ops * ops )
{
struct clk * clkp ;
void * freq_table ;
int nr_divs = table - > div_mult_table - > nr_divisors ;
int freq_table_size = sizeof ( struct cpufreq_frequency_table ) ;
int ret = 0 ;
int k ;
freq_table_size * = ( nr_divs + 1 ) ;
freq_table = kzalloc ( freq_table_size * nr , GFP_KERNEL ) ;
if ( ! freq_table ) {
pr_err ( " %s: unable to alloc memory \n " , __func__ ) ;
return - ENOMEM ;
}
for ( k = 0 ; ! ret & & ( k < nr ) ; k + + ) {
clkp = clks + k ;
clkp - > ops = ops ;
clkp - > priv = table ;
clkp - > freq_table = freq_table + ( k * freq_table_size ) ;
clkp - > freq_table [ nr_divs ] . frequency = CPUFREQ_TABLE_END ;
ret = clk_register ( clkp ) ;
if ( ret = = 0 )
ret = sh_clk_init_parent ( clkp ) ;
}
return ret ;
}
2012-05-25 14:59:26 +09:00
/*
* div6 support
*/
2010-05-11 13:29:34 +00:00
static int sh_clk_div6_divisors [ 64 ] = {
1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 ,
17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 32 ,
33 , 34 , 35 , 36 , 37 , 38 , 39 , 40 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 ,
49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 , 57 , 58 , 59 , 60 , 61 , 62 , 63 , 64
} ;
2012-05-25 14:59:26 +09:00
static struct clk_div_mult_table div6_div_mult_table = {
2010-05-11 13:29:34 +00:00
. divisors = sh_clk_div6_divisors ,
. nr_divisors = ARRAY_SIZE ( sh_clk_div6_divisors ) ,
} ;
2012-05-25 14:59:26 +09:00
static struct clk_div_table sh_clk_div6_table = {
. div_mult_table = & div6_div_mult_table ,
} ;
2010-07-21 10:13:10 +00:00
static int sh_clk_div6_set_parent ( struct clk * clk , struct clk * parent )
{
2012-05-25 14:59:26 +09:00
struct clk_div_mult_table * table = clk_to_div_mult_table ( clk ) ;
2010-07-21 10:13:10 +00:00
u32 value ;
int ret , i ;
if ( ! clk - > parent_table | | ! clk - > parent_num )
return - EINVAL ;
/* Search the parent */
for ( i = 0 ; i < clk - > parent_num ; i + + )
if ( clk - > parent_table [ i ] = = parent )
break ;
if ( i = = clk - > parent_num )
return - ENODEV ;
ret = clk_reparent ( clk , parent ) ;
if ( ret < 0 )
return ret ;
2012-04-12 19:50:40 +09:00
value = sh_clk_read ( clk ) &
2010-07-21 10:13:10 +00:00
~ ( ( ( 1 < < clk - > src_width ) - 1 ) < < clk - > src_shift ) ;
2012-04-12 19:50:40 +09:00
sh_clk_write ( value | ( i < < clk - > src_shift ) , clk ) ;
2010-07-21 10:13:10 +00:00
/* Rebuild the frequency table */
clk_rate_table_build ( clk , clk - > freq_table , table - > nr_divisors ,
2011-04-14 17:13:53 +09:00
table , NULL ) ;
2010-07-21 10:13:10 +00:00
return 0 ;
}
2012-02-29 22:16:21 +09:00
static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
2012-05-25 15:26:01 +09:00
. recalc = sh_clk_div_recalc ,
2010-07-21 10:13:10 +00:00
. round_rate = sh_clk_div_round_rate ,
2012-05-25 15:52:10 +09:00
. set_rate = sh_clk_div_set_rate ,
2012-05-25 16:34:48 +09:00
. enable = sh_clk_div_enable ,
. disable = sh_clk_div_disable ,
2010-07-21 10:13:10 +00:00
. set_parent = sh_clk_div6_set_parent ,
} ;
int __init sh_clk_div6_register ( struct clk * clks , int nr )
{
2012-05-25 16:55:05 +09:00
return sh_clk_div_register_ops ( clks , nr , & sh_clk_div6_table ,
& sh_clk_div_enable_clk_ops ) ;
2010-07-21 10:13:10 +00:00
}
int __init sh_clk_div6_reparent_register ( struct clk * clks , int nr )
{
2012-05-25 16:55:05 +09:00
return sh_clk_div_register_ops ( clks , nr , & sh_clk_div6_table ,
& sh_clk_div6_reparent_clk_ops ) ;
2010-07-21 10:13:10 +00:00
}
2012-05-25 14:59:26 +09:00
/*
* div4 support
*/
2010-05-11 13:29:34 +00:00
static int sh_clk_div4_set_parent ( struct clk * clk , struct clk * parent )
{
2012-05-25 14:59:26 +09:00
struct clk_div_mult_table * table = clk_to_div_mult_table ( clk ) ;
2010-05-11 13:29:34 +00:00
u32 value ;
int ret ;
/* we really need a better way to determine parent index, but for
* now assume internal parent comes with CLK_ENABLE_ON_INIT set ,
* no CLK_ENABLE_ON_INIT means external clock . . .
*/
if ( parent - > flags & CLK_ENABLE_ON_INIT )
2012-04-12 19:50:40 +09:00
value = sh_clk_read ( clk ) & ~ ( 1 < < 7 ) ;
2010-05-11 13:29:34 +00:00
else
2012-04-12 19:50:40 +09:00
value = sh_clk_read ( clk ) | ( 1 < < 7 ) ;
2010-05-11 13:29:34 +00:00
ret = clk_reparent ( clk , parent ) ;
if ( ret < 0 )
return ret ;
2012-04-12 19:50:40 +09:00
sh_clk_write ( value , clk ) ;
2010-05-11 13:29:34 +00:00
/* Rebiuld the frequency table */
clk_rate_table_build ( clk , clk - > freq_table , table - > nr_divisors ,
table , & clk - > arch_flags ) ;
return 0 ;
}
2012-02-29 22:16:21 +09:00
static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
2012-05-25 15:26:01 +09:00
. recalc = sh_clk_div_recalc ,
2012-05-25 15:52:10 +09:00
. set_rate = sh_clk_div_set_rate ,
2010-05-11 13:29:34 +00:00
. round_rate = sh_clk_div_round_rate ,
2012-05-25 16:34:48 +09:00
. enable = sh_clk_div_enable ,
. disable = sh_clk_div_disable ,
2010-05-11 13:29:34 +00:00
. set_parent = sh_clk_div4_set_parent ,
} ;
int __init sh_clk_div4_register ( struct clk * clks , int nr ,
struct clk_div4_table * table )
{
2012-05-25 16:55:05 +09:00
return sh_clk_div_register_ops ( clks , nr , table , & sh_clk_div_clk_ops ) ;
2010-05-11 13:29:34 +00:00
}
int __init sh_clk_div4_enable_register ( struct clk * clks , int nr ,
struct clk_div4_table * table )
{
2012-05-25 16:55:05 +09:00
return sh_clk_div_register_ops ( clks , nr , table ,
& sh_clk_div_enable_clk_ops ) ;
2010-05-11 13:29:34 +00:00
}
int __init sh_clk_div4_reparent_register ( struct clk * clks , int nr ,
struct clk_div4_table * table )
{
2012-05-25 16:55:05 +09:00
return sh_clk_div_register_ops ( clks , nr , table ,
& sh_clk_div4_reparent_clk_ops ) ;
2010-05-11 13:29:34 +00:00
}
2012-10-30 20:06:55 -07:00
/* FSI-DIV */
static unsigned long fsidiv_recalc ( struct clk * clk )
{
u32 value ;
value = __raw_readl ( clk - > mapping - > base ) ;
value > > = 16 ;
if ( value < 2 )
return clk - > parent - > rate ;
return clk - > parent - > rate / value ;
}
static long fsidiv_round_rate ( struct clk * clk , unsigned long rate )
{
return clk_rate_div_range_round ( clk , 1 , 0xffff , rate ) ;
}
static void fsidiv_disable ( struct clk * clk )
{
__raw_writel ( 0 , clk - > mapping - > base ) ;
}
static int fsidiv_enable ( struct clk * clk )
{
u32 value ;
value = __raw_readl ( clk - > mapping - > base ) > > 16 ;
if ( value < 2 )
return 0 ;
__raw_writel ( ( value < < 16 ) | 0x3 , clk - > mapping - > base ) ;
return 0 ;
}
static int fsidiv_set_rate ( struct clk * clk , unsigned long rate )
{
int idx ;
idx = ( clk - > parent - > rate / rate ) & 0xffff ;
if ( idx < 2 )
__raw_writel ( 0 , clk - > mapping - > base ) ;
else
__raw_writel ( idx < < 16 , clk - > mapping - > base ) ;
return 0 ;
}
static struct sh_clk_ops fsidiv_clk_ops = {
. recalc = fsidiv_recalc ,
. round_rate = fsidiv_round_rate ,
. set_rate = fsidiv_set_rate ,
. enable = fsidiv_enable ,
. disable = fsidiv_disable ,
} ;
int __init sh_clk_fsidiv_register ( struct clk * clks , int nr )
{
struct clk_mapping * map ;
int i ;
for ( i = 0 ; i < nr ; i + + ) {
map = kzalloc ( sizeof ( struct clk_mapping ) , GFP_KERNEL ) ;
if ( ! map ) {
pr_err ( " %s: unable to alloc memory \n " , __func__ ) ;
return - ENOMEM ;
}
/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
map - > phys = ( phys_addr_t ) clks [ i ] . enable_reg ;
map - > len = 8 ;
clks [ i ] . enable_reg = 0 ; /* remove .enable_reg */
clks [ i ] . ops = & fsidiv_clk_ops ;
clks [ i ] . mapping = map ;
clk_register ( & clks [ i ] ) ;
}
return 0 ;
}