2010-05-11 17:29:25 +04:00
/*
2010-10-18 16:32:58 +04:00
* SuperH clock framework
2010-05-11 17:29:25 +04:00
*
2010-08-20 14:10:38 +04:00
* Copyright ( C ) 2005 - 2010 Paul Mundt
2010-05-11 17:29:25 +04:00
*
* This clock framework is derived from the OMAP version by :
*
* Copyright ( C ) 2004 - 2008 Nokia Corporation
* Written by Tuukka Tikkanen < tuukka . tikkanen @ elektrobit . com >
*
* Modified for omap shared clock framework by Tony Lindgren < tony @ atomide . com >
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
2010-10-13 14:24:55 +04:00
# define pr_fmt(fmt) "clock: " fmt
2010-05-11 17:29:25 +04:00
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/list.h>
2011-03-22 23:19:28 +03:00
# include <linux/syscore_ops.h>
2010-05-11 17:29:25 +04:00
# include <linux/seq_file.h>
# include <linux/err.h>
2010-10-15 11:46:37 +04:00
# include <linux/io.h>
2010-05-11 17:29:25 +04:00
# include <linux/cpufreq.h>
# include <linux/clk.h>
# include <linux/sh_clk.h>
static LIST_HEAD ( clock_list ) ;
static DEFINE_SPINLOCK ( clock_lock ) ;
static DEFINE_MUTEX ( clock_list_sem ) ;
2011-06-21 11:55:12 +04:00
/* clock disable operations are not passed on to hardware during boot */
static int allow_disable ;
2010-05-11 17:29:25 +04:00
void clk_rate_table_build ( struct clk * clk ,
struct cpufreq_frequency_table * freq_table ,
int nr_freqs ,
struct clk_div_mult_table * src_table ,
unsigned long * bitmap )
{
unsigned long mult , div ;
unsigned long freq ;
int i ;
2010-10-15 13:17:35 +04:00
clk - > nr_freqs = nr_freqs ;
2010-05-11 17:29:25 +04:00
for ( i = 0 ; i < nr_freqs ; i + + ) {
div = 1 ;
mult = 1 ;
if ( src_table - > divisors & & i < src_table - > nr_divisors )
div = src_table - > divisors [ i ] ;
if ( src_table - > multipliers & & i < src_table - > nr_multipliers )
mult = src_table - > multipliers [ i ] ;
if ( ! div | | ! mult | | ( bitmap & & ! test_bit ( i , bitmap ) ) )
freq = CPUFREQ_ENTRY_INVALID ;
else
freq = clk - > parent - > rate * mult / div ;
2013-03-30 14:55:15 +04:00
freq_table [ i ] . driver_data = i ;
2010-05-11 17:29:25 +04:00
freq_table [ i ] . frequency = freq ;
}
/* Termination entry */
2013-03-30 14:55:15 +04:00
freq_table [ i ] . driver_data = i ;
2010-05-11 17:29:25 +04:00
freq_table [ i ] . frequency = CPUFREQ_TABLE_END ;
}
2010-10-15 13:17:35 +04:00
struct clk_rate_round_data ;
struct clk_rate_round_data {
unsigned long rate ;
unsigned int min , max ;
2010-10-15 13:33:24 +04:00
long ( * func ) ( unsigned int , struct clk_rate_round_data * ) ;
2010-10-15 13:17:35 +04:00
void * arg ;
} ;
# define for_each_frequency(pos, r, freq) \
2010-10-15 19:51:05 +04:00
for ( pos = r - > min , freq = r - > func ( pos , r ) ; \
2010-10-18 07:50:29 +04:00
pos < = r - > max ; pos + + , freq = r - > func ( pos , r ) ) \
2010-10-15 13:17:35 +04:00
if ( unlikely ( freq = = 0 ) ) \
; \
else
static long clk_rate_round_helper ( struct clk_rate_round_data * rounder )
2010-05-11 17:29:25 +04:00
{
unsigned long rate_error , rate_error_prev = ~ 0UL ;
2010-10-15 13:17:35 +04:00
unsigned long highest , lowest , freq ;
2010-11-01 22:30:31 +03:00
long rate_best_fit = - ENOENT ;
2010-05-11 17:29:25 +04:00
int i ;
2010-08-20 14:10:38 +04:00
highest = 0 ;
lowest = ~ 0UL ;
2010-10-15 13:17:35 +04:00
for_each_frequency ( i , rounder , freq ) {
2010-08-20 14:10:38 +04:00
if ( freq > highest )
highest = freq ;
if ( freq < lowest )
lowest = freq ;
2010-10-15 13:17:35 +04:00
rate_error = abs ( freq - rounder - > rate ) ;
2010-05-11 17:29:25 +04:00
if ( rate_error < rate_error_prev ) {
rate_best_fit = freq ;
rate_error_prev = rate_error ;
}
if ( rate_error = = 0 )
break ;
}
2010-10-15 13:17:35 +04:00
if ( rounder - > rate > = highest )
2010-08-20 14:10:38 +04:00
rate_best_fit = highest ;
2010-10-15 13:17:35 +04:00
if ( rounder - > rate < = lowest )
2010-08-20 14:10:38 +04:00
rate_best_fit = lowest ;
2010-05-11 17:29:25 +04:00
return rate_best_fit ;
}
2010-10-15 13:17:35 +04:00
static long clk_rate_table_iter ( unsigned int pos ,
struct clk_rate_round_data * rounder )
{
struct cpufreq_frequency_table * freq_table = rounder - > arg ;
unsigned long freq = freq_table [ pos ] . frequency ;
if ( freq = = CPUFREQ_ENTRY_INVALID )
freq = 0 ;
return freq ;
}
long clk_rate_table_round ( struct clk * clk ,
struct cpufreq_frequency_table * freq_table ,
unsigned long rate )
{
struct clk_rate_round_data table_round = {
. min = 0 ,
2010-10-18 07:50:29 +04:00
. max = clk - > nr_freqs - 1 ,
2010-10-15 13:17:35 +04:00
. func = clk_rate_table_iter ,
. arg = freq_table ,
. rate = rate ,
} ;
2010-10-18 07:50:29 +04:00
if ( clk - > nr_freqs < 1 )
2010-11-01 22:30:31 +03:00
return - ENOSYS ;
2010-10-18 07:50:29 +04:00
2010-10-15 13:17:35 +04:00
return clk_rate_round_helper ( & table_round ) ;
}
2010-10-15 13:33:24 +04:00
static long clk_rate_div_range_iter ( unsigned int pos ,
struct clk_rate_round_data * rounder )
{
return clk_get_rate ( rounder - > arg ) / pos ;
}
long clk_rate_div_range_round ( struct clk * clk , unsigned int div_min ,
unsigned int div_max , unsigned long rate )
{
struct clk_rate_round_data div_range_round = {
. min = div_min ,
. max = div_max ,
. func = clk_rate_div_range_iter ,
. arg = clk_get_parent ( clk ) ,
. rate = rate ,
} ;
return clk_rate_round_helper ( & div_range_round ) ;
}
2011-09-20 05:51:13 +04:00
static long clk_rate_mult_range_iter ( unsigned int pos ,
struct clk_rate_round_data * rounder )
{
return clk_get_rate ( rounder - > arg ) * pos ;
}
long clk_rate_mult_range_round ( struct clk * clk , unsigned int mult_min ,
unsigned int mult_max , unsigned long rate )
{
struct clk_rate_round_data mult_range_round = {
. min = mult_min ,
. max = mult_max ,
. func = clk_rate_mult_range_iter ,
. arg = clk_get_parent ( clk ) ,
. rate = rate ,
} ;
return clk_rate_round_helper ( & mult_range_round ) ;
}
2010-05-11 17:29:25 +04:00
int clk_rate_table_find ( struct clk * clk ,
struct cpufreq_frequency_table * freq_table ,
unsigned long rate )
{
2014-04-26 00:16:58 +04:00
struct cpufreq_frequency_table * pos ;
2010-05-11 17:29:25 +04:00
2014-04-26 00:16:58 +04:00
cpufreq_for_each_valid_entry ( pos , freq_table )
if ( pos - > frequency = = rate )
return pos - freq_table ;
2010-05-11 17:29:25 +04:00
return - ENOENT ;
}
/* Used for clocks that always have same value as the parent clock */
unsigned long followparent_recalc ( struct clk * clk )
{
return clk - > parent ? clk - > parent - > rate : 0 ;
}
int clk_reparent ( struct clk * child , struct clk * parent )
{
list_del_init ( & child - > sibling ) ;
if ( parent )
list_add ( & child - > sibling , & parent - > children ) ;
child - > parent = parent ;
return 0 ;
}
/* Propagate rate to children */
void propagate_rate ( struct clk * tclk )
{
struct clk * clkp ;
list_for_each_entry ( clkp , & tclk - > children , sibling ) {
if ( clkp - > ops & & clkp - > ops - > recalc )
clkp - > rate = clkp - > ops - > recalc ( clkp ) ;
propagate_rate ( clkp ) ;
}
}
static void __clk_disable ( struct clk * clk )
{
2010-10-13 11:44:36 +04:00
if ( WARN ( ! clk - > usecount , " Trying to disable clock %p with 0 usecount \n " ,
clk ) )
2010-05-11 17:29:25 +04:00
return ;
if ( ! ( - - clk - > usecount ) ) {
2011-06-21 11:55:12 +04:00
if ( likely ( allow_disable & & clk - > ops & & clk - > ops - > disable ) )
2010-05-11 17:29:25 +04:00
clk - > ops - > disable ( clk ) ;
if ( likely ( clk - > parent ) )
__clk_disable ( clk - > parent ) ;
}
}
void clk_disable ( struct clk * clk )
{
unsigned long flags ;
if ( ! clk )
return ;
spin_lock_irqsave ( & clock_lock , flags ) ;
__clk_disable ( clk ) ;
spin_unlock_irqrestore ( & clock_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( clk_disable ) ;
static int __clk_enable ( struct clk * clk )
{
int ret = 0 ;
if ( clk - > usecount + + = = 0 ) {
if ( clk - > parent ) {
ret = __clk_enable ( clk - > parent ) ;
if ( unlikely ( ret ) )
goto err ;
}
if ( clk - > ops & & clk - > ops - > enable ) {
ret = clk - > ops - > enable ( clk ) ;
if ( ret ) {
if ( clk - > parent )
__clk_disable ( clk - > parent ) ;
goto err ;
}
}
}
return ret ;
err :
clk - > usecount - - ;
return ret ;
}
int clk_enable ( struct clk * clk )
{
unsigned long flags ;
int ret ;
if ( ! clk )
return - EINVAL ;
spin_lock_irqsave ( & clock_lock , flags ) ;
ret = __clk_enable ( clk ) ;
spin_unlock_irqrestore ( & clock_lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( clk_enable ) ;
static LIST_HEAD ( root_clks ) ;
/**
* recalculate_root_clocks - recalculate and propagate all root clocks
*
* Recalculates all root clocks ( clocks with no parent ) , which if the
* clock ' s . recalc is set correctly , should also propagate their rates .
* Called at init .
*/
void recalculate_root_clocks ( void )
{
struct clk * clkp ;
list_for_each_entry ( clkp , & root_clks , sibling ) {
if ( clkp - > ops & & clkp - > ops - > recalc )
clkp - > rate = clkp - > ops - > recalc ( clkp ) ;
propagate_rate ( clkp ) ;
}
}
2010-10-15 11:46:37 +04:00
static struct clk_mapping dummy_mapping ;
static struct clk * lookup_root_clock ( struct clk * clk )
{
while ( clk - > parent )
clk = clk - > parent ;
return clk ;
}
static int clk_establish_mapping ( struct clk * clk )
{
struct clk_mapping * mapping = clk - > mapping ;
/*
* Propagate mappings .
*/
if ( ! mapping ) {
struct clk * clkp ;
/*
* dummy mapping for root clocks with no specified ranges
*/
if ( ! clk - > parent ) {
clk - > mapping = & dummy_mapping ;
2011-12-08 17:58:54 +04:00
goto out ;
2010-10-15 11:46:37 +04:00
}
/*
* If we ' re on a child clock and it provides no mapping of its
* own , inherit the mapping from its root clock .
*/
clkp = lookup_root_clock ( clk ) ;
mapping = clkp - > mapping ;
BUG_ON ( ! mapping ) ;
}
/*
* Establish initial mapping .
*/
if ( ! mapping - > base & & mapping - > phys ) {
kref_init ( & mapping - > ref ) ;
mapping - > base = ioremap_nocache ( mapping - > phys , mapping - > len ) ;
if ( unlikely ( ! mapping - > base ) )
return - ENXIO ;
} else if ( mapping - > base ) {
/*
* Bump the refcount for an existing mapping
*/
kref_get ( & mapping - > ref ) ;
}
clk - > mapping = mapping ;
2011-12-08 17:58:54 +04:00
out :
clk - > mapped_reg = clk - > mapping - > base ;
clk - > mapped_reg + = ( phys_addr_t ) clk - > enable_reg - clk - > mapping - > phys ;
2010-10-15 11:46:37 +04:00
return 0 ;
}
static void clk_destroy_mapping ( struct kref * kref )
{
struct clk_mapping * mapping ;
mapping = container_of ( kref , struct clk_mapping , ref ) ;
iounmap ( mapping - > base ) ;
}
static void clk_teardown_mapping ( struct clk * clk )
{
struct clk_mapping * mapping = clk - > mapping ;
/* Nothing to do */
if ( mapping = = & dummy_mapping )
2011-12-08 17:58:54 +04:00
goto out ;
2010-10-15 11:46:37 +04:00
kref_put ( & mapping - > ref , clk_destroy_mapping ) ;
clk - > mapping = NULL ;
2011-12-08 17:58:54 +04:00
out :
clk - > mapped_reg = NULL ;
2010-10-15 11:46:37 +04:00
}
2010-05-11 17:29:25 +04:00
int clk_register ( struct clk * clk )
{
2010-10-15 11:46:37 +04:00
int ret ;
2011-06-24 12:35:40 +04:00
if ( IS_ERR_OR_NULL ( clk ) )
2010-05-11 17:29:25 +04:00
return - EINVAL ;
/*
* trap out already registered clocks
*/
if ( clk - > node . next | | clk - > node . prev )
return 0 ;
mutex_lock ( & clock_list_sem ) ;
INIT_LIST_HEAD ( & clk - > children ) ;
clk - > usecount = 0 ;
2010-10-15 11:46:37 +04:00
ret = clk_establish_mapping ( clk ) ;
if ( unlikely ( ret ) )
goto out_unlock ;
2010-05-11 17:29:25 +04:00
if ( clk - > parent )
list_add ( & clk - > sibling , & clk - > parent - > children ) ;
else
list_add ( & clk - > sibling , & root_clks ) ;
list_add ( & clk - > node , & clock_list ) ;
2010-11-19 10:40:35 +03:00
# ifdef CONFIG_SH_CLK_CPG_LEGACY
2010-05-11 17:29:25 +04:00
if ( clk - > ops & & clk - > ops - > init )
clk - > ops - > init ( clk ) ;
2010-11-19 10:40:35 +03:00
# endif
2010-10-15 11:46:37 +04:00
out_unlock :
2010-05-11 17:29:25 +04:00
mutex_unlock ( & clock_list_sem ) ;
2010-10-15 11:46:37 +04:00
return ret ;
2010-05-11 17:29:25 +04:00
}
EXPORT_SYMBOL_GPL ( clk_register ) ;
void clk_unregister ( struct clk * clk )
{
mutex_lock ( & clock_list_sem ) ;
list_del ( & clk - > sibling ) ;
list_del ( & clk - > node ) ;
2010-10-15 11:46:37 +04:00
clk_teardown_mapping ( clk ) ;
2010-05-11 17:29:25 +04:00
mutex_unlock ( & clock_list_sem ) ;
}
EXPORT_SYMBOL_GPL ( clk_unregister ) ;
void clk_enable_init_clocks ( void )
{
struct clk * clkp ;
list_for_each_entry ( clkp , & clock_list , node )
if ( clkp - > flags & CLK_ENABLE_ON_INIT )
clk_enable ( clkp ) ;
}
unsigned long clk_get_rate ( struct clk * clk )
{
return clk - > rate ;
}
EXPORT_SYMBOL_GPL ( clk_get_rate ) ;
int clk_set_rate ( struct clk * clk , unsigned long rate )
{
int ret = - EOPNOTSUPP ;
unsigned long flags ;
spin_lock_irqsave ( & clock_lock , flags ) ;
if ( likely ( clk - > ops & & clk - > ops - > set_rate ) ) {
2010-11-15 12:18:32 +03:00
ret = clk - > ops - > set_rate ( clk , rate ) ;
2010-05-11 17:29:25 +04:00
if ( ret ! = 0 )
goto out_unlock ;
} else {
clk - > rate = rate ;
ret = 0 ;
}
if ( clk - > ops & & clk - > ops - > recalc )
clk - > rate = clk - > ops - > recalc ( clk ) ;
propagate_rate ( clk ) ;
out_unlock :
spin_unlock_irqrestore ( & clock_lock , flags ) ;
return ret ;
}
2010-11-15 12:14:43 +03:00
EXPORT_SYMBOL_GPL ( clk_set_rate ) ;
2010-05-11 17:29:25 +04:00
int clk_set_parent ( struct clk * clk , struct clk * parent )
{
unsigned long flags ;
int ret = - EINVAL ;
if ( ! parent | | ! clk )
return ret ;
if ( clk - > parent = = parent )
return 0 ;
spin_lock_irqsave ( & clock_lock , flags ) ;
if ( clk - > usecount = = 0 ) {
if ( clk - > ops - > set_parent )
ret = clk - > ops - > set_parent ( clk , parent ) ;
else
ret = clk_reparent ( clk , parent ) ;
if ( ret = = 0 ) {
if ( clk - > ops - > recalc )
clk - > rate = clk - > ops - > recalc ( clk ) ;
2010-10-13 14:24:55 +04:00
pr_debug ( " set parent of %p to %p (new rate %ld) \n " ,
2010-10-13 11:44:36 +04:00
clk , clk - > parent , clk - > rate ) ;
2010-05-11 17:29:25 +04:00
propagate_rate ( clk ) ;
}
} else
ret = - EBUSY ;
spin_unlock_irqrestore ( & clock_lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( clk_set_parent ) ;
struct clk * clk_get_parent ( struct clk * clk )
{
return clk - > parent ;
}
EXPORT_SYMBOL_GPL ( clk_get_parent ) ;
long clk_round_rate ( struct clk * clk , unsigned long rate )
{
if ( likely ( clk - > ops & & clk - > ops - > round_rate ) ) {
unsigned long flags , rounded ;
spin_lock_irqsave ( & clock_lock , flags ) ;
rounded = clk - > ops - > round_rate ( clk , rate ) ;
spin_unlock_irqrestore ( & clock_lock , flags ) ;
return rounded ;
}
return clk_get_rate ( clk ) ;
}
EXPORT_SYMBOL_GPL ( clk_round_rate ) ;
2010-11-02 14:27:24 +03:00
long clk_round_parent ( struct clk * clk , unsigned long target ,
unsigned long * best_freq , unsigned long * parent_freq ,
unsigned int div_min , unsigned int div_max )
{
struct cpufreq_frequency_table * freq , * best = NULL ;
unsigned long error = ULONG_MAX , freq_high , freq_low , div ;
struct clk * parent = clk_get_parent ( clk ) ;
if ( ! parent ) {
* parent_freq = 0 ;
* best_freq = clk_round_rate ( clk , target ) ;
return abs ( target - * best_freq ) ;
}
2014-04-26 00:16:58 +04:00
cpufreq_for_each_valid_entry ( freq , parent - > freq_table ) {
2010-11-02 14:27:24 +03:00
if ( unlikely ( freq - > frequency / target < = div_min - 1 ) ) {
2010-11-08 03:40:23 +03:00
unsigned long freq_max ;
freq_max = ( freq - > frequency + div_min / 2 ) / div_min ;
2010-11-02 14:27:24 +03:00
if ( error > target - freq_max ) {
error = target - freq_max ;
best = freq ;
if ( best_freq )
* best_freq = freq_max ;
}
2010-11-08 03:40:23 +03:00
2010-11-10 12:02:25 +03:00
pr_debug ( " too low freq %u, error %lu \n " , freq - > frequency ,
2010-11-08 03:40:23 +03:00
target - freq_max ) ;
2010-11-02 14:27:24 +03:00
if ( ! error )
break ;
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
continue ;
}
if ( unlikely ( freq - > frequency / target > = div_max ) ) {
2010-11-08 03:40:23 +03:00
unsigned long freq_min ;
freq_min = ( freq - > frequency + div_max / 2 ) / div_max ;
2010-11-02 14:27:24 +03:00
if ( error > freq_min - target ) {
error = freq_min - target ;
best = freq ;
if ( best_freq )
* best_freq = freq_min ;
}
2010-11-08 03:40:23 +03:00
2010-11-10 12:02:25 +03:00
pr_debug ( " too high freq %u, error %lu \n " , freq - > frequency ,
2010-11-08 03:40:23 +03:00
freq_min - target ) ;
2010-11-02 14:27:24 +03:00
if ( ! error )
break ;
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
continue ;
}
div = freq - > frequency / target ;
freq_high = freq - > frequency / div ;
freq_low = freq - > frequency / ( div + 1 ) ;
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
if ( freq_high - target < error ) {
error = freq_high - target ;
best = freq ;
if ( best_freq )
* best_freq = freq_high ;
}
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
if ( target - freq_low < error ) {
error = target - freq_low ;
best = freq ;
if ( best_freq )
* best_freq = freq_low ;
}
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
pr_debug ( " %u / %lu = %lu, / %lu = %lu, best %lu, parent %u \n " ,
freq - > frequency , div , freq_high , div + 1 , freq_low ,
* best_freq , best - > frequency ) ;
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
if ( ! error )
break ;
}
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
if ( parent_freq )
* parent_freq = best - > frequency ;
2010-11-08 03:40:23 +03:00
2010-11-02 14:27:24 +03:00
return error ;
}
EXPORT_SYMBOL_GPL ( clk_round_parent ) ;
2010-05-11 17:29:25 +04:00
# ifdef CONFIG_PM
2011-03-22 23:19:28 +03:00
static void clks_core_resume ( void )
2010-05-11 17:29:25 +04:00
{
struct clk * clkp ;
2011-03-22 23:19:28 +03:00
list_for_each_entry ( clkp , & clock_list , node ) {
2011-06-13 08:42:15 +04:00
if ( likely ( clkp - > usecount & & clkp - > ops ) ) {
2011-03-22 23:19:28 +03:00
unsigned long rate = clkp - > rate ;
if ( likely ( clkp - > ops - > set_parent ) )
clkp - > ops - > set_parent ( clkp ,
clkp - > parent ) ;
if ( likely ( clkp - > ops - > set_rate ) )
clkp - > ops - > set_rate ( clkp , rate ) ;
else if ( likely ( clkp - > ops - > recalc ) )
clkp - > rate = clkp - > ops - > recalc ( clkp ) ;
2010-05-11 17:29:25 +04:00
}
}
}
2011-03-22 23:19:28 +03:00
static struct syscore_ops clks_syscore_ops = {
. resume = clks_core_resume ,
2010-05-11 17:29:25 +04:00
} ;
2011-03-22 23:19:28 +03:00
static int __init clk_syscore_init ( void )
2010-05-11 17:29:25 +04:00
{
2011-03-22 23:19:28 +03:00
register_syscore_ops ( & clks_syscore_ops ) ;
2010-05-11 17:29:25 +04:00
return 0 ;
}
2011-03-22 23:19:28 +03:00
subsys_initcall ( clk_syscore_init ) ;
2010-05-11 17:29:25 +04:00
# endif
2011-06-21 11:55:12 +04:00
static int __init clk_late_init ( void )
{
unsigned long flags ;
struct clk * clk ;
/* disable all clocks with zero use count */
mutex_lock ( & clock_list_sem ) ;
spin_lock_irqsave ( & clock_lock , flags ) ;
list_for_each_entry ( clk , & clock_list , node )
if ( ! clk - > usecount & & clk - > ops & & clk - > ops - > disable )
clk - > ops - > disable ( clk ) ;
/* from now on allow clock disable operations */
allow_disable = 1 ;
spin_unlock_irqrestore ( & clock_lock , flags ) ;
mutex_unlock ( & clock_list_sem ) ;
return 0 ;
}
late_initcall ( clk_late_init ) ;