2018-12-11 20:57:48 +03:00
// SPDX-License-Identifier: GPL-2.0
2012-03-16 10:11:20 +04:00
/*
* Copyright ( C ) 2011 Sascha Hauer , Pengutronix < s . hauer @ pengutronix . de >
* Copyright ( C ) 2011 Richard Zhao , Linaro < richard . zhao @ linaro . org >
* Copyright ( C ) 2011 - 2012 Mike Turquette , Linaro Ltd < mturquette @ linaro . org >
*
* Adjustable divider clock implementation
*/
# include <linux/clk-provider.h>
2020-11-08 21:51:09 +03:00
# include <linux/device.h>
2012-03-16 10:11:20 +04:00
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/err.h>
# include <linux/string.h>
2013-01-15 14:28:05 +04:00
# include <linux/log2.h>
2012-03-16 10:11:20 +04:00
/*
* DOC : basic adjustable divider clock that cannot gate
*
* Traits of this clock :
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
2015-04-14 02:03:21 +03:00
* rate - rate is adjustable . clk - > rate = ceiling ( parent - > rate / divisor )
2012-03-16 10:11:20 +04:00
* parent - fixed parent . No clk_set_parent support
*/
2019-04-18 14:12:04 +03:00
static inline u32 clk_div_readl ( struct clk_divider * divider )
{
if ( divider - > flags & CLK_DIVIDER_BIG_ENDIAN )
return ioread32be ( divider - > reg ) ;
2019-04-18 14:12:11 +03:00
return readl ( divider - > reg ) ;
2019-04-18 14:12:04 +03:00
}
static inline void clk_div_writel ( struct clk_divider * divider , u32 val )
{
if ( divider - > flags & CLK_DIVIDER_BIG_ENDIAN )
iowrite32be ( val , divider - > reg ) ;
else
2019-04-18 14:12:11 +03:00
writel ( val , divider - > reg ) ;
2019-04-18 14:12:04 +03:00
}
2015-12-01 04:31:38 +03:00
static unsigned int _get_table_maxdiv ( const struct clk_div_table * table ,
u8 width )
2012-06-29 17:36:32 +04:00
{
2018-02-14 16:43:33 +03:00
unsigned int maxdiv = 0 , mask = clk_div_mask ( width ) ;
2012-06-29 17:36:32 +04:00
const struct clk_div_table * clkt ;
for ( clkt = table ; clkt - > div ; clkt + + )
2015-12-01 04:31:38 +03:00
if ( clkt - > div > maxdiv & & clkt - > val < = mask )
2012-06-29 17:36:32 +04:00
maxdiv = clkt - > div ;
return maxdiv ;
}
2014-01-29 20:24:07 +04:00
static unsigned int _get_table_mindiv ( const struct clk_div_table * table )
{
unsigned int mindiv = UINT_MAX ;
const struct clk_div_table * clkt ;
for ( clkt = table ; clkt - > div ; clkt + + )
if ( clkt - > div < mindiv )
mindiv = clkt - > div ;
return mindiv ;
}
2015-01-20 05:05:29 +03:00
static unsigned int _get_maxdiv ( const struct clk_div_table * table , u8 width ,
unsigned long flags )
2012-05-17 14:22:13 +04:00
{
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_ONE_BASED )
2018-02-14 16:43:33 +03:00
return clk_div_mask ( width ) ;
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO )
2018-02-14 16:43:33 +03:00
return 1 < < clk_div_mask ( width ) ;
2015-01-20 05:05:29 +03:00
if ( table )
2015-12-01 04:31:38 +03:00
return _get_table_maxdiv ( table , width ) ;
2018-02-14 16:43:33 +03:00
return clk_div_mask ( width ) + 1 ;
2012-05-17 14:22:13 +04:00
}
2012-06-29 17:36:32 +04:00
static unsigned int _get_table_div ( const struct clk_div_table * table ,
unsigned int val )
{
const struct clk_div_table * clkt ;
for ( clkt = table ; clkt - > div ; clkt + + )
if ( clkt - > val = = val )
return clkt - > div ;
return 0 ;
}
2015-01-20 05:05:29 +03:00
static unsigned int _get_div ( const struct clk_div_table * table ,
2015-05-15 22:45:47 +03:00
unsigned int val , unsigned long flags , u8 width )
2012-05-17 14:22:13 +04:00
{
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_ONE_BASED )
2012-05-17 14:22:13 +04:00
return val ;
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO )
2012-05-17 14:22:13 +04:00
return 1 < < val ;
2015-05-15 22:45:47 +03:00
if ( flags & CLK_DIVIDER_MAX_AT_ZERO )
2018-02-14 16:43:33 +03:00
return val ? val : clk_div_mask ( width ) + 1 ;
2015-01-20 05:05:29 +03:00
if ( table )
return _get_table_div ( table , val ) ;
2012-05-17 14:22:13 +04:00
return val + 1 ;
}
2012-06-29 17:36:32 +04:00
static unsigned int _get_table_val ( const struct clk_div_table * table ,
unsigned int div )
{
const struct clk_div_table * clkt ;
for ( clkt = table ; clkt - > div ; clkt + + )
if ( clkt - > div = = div )
return clkt - > val ;
return 0 ;
}
2015-01-20 05:05:29 +03:00
static unsigned int _get_val ( const struct clk_div_table * table ,
2015-05-15 22:45:47 +03:00
unsigned int div , unsigned long flags , u8 width )
2012-05-17 14:22:13 +04:00
{
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_ONE_BASED )
2012-05-17 14:22:13 +04:00
return div ;
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO )
2012-05-17 14:22:13 +04:00
return __ffs ( div ) ;
2015-05-15 22:45:47 +03:00
if ( flags & CLK_DIVIDER_MAX_AT_ZERO )
2018-02-14 16:43:33 +03:00
return ( div = = clk_div_mask ( width ) + 1 ) ? 0 : div ;
2015-01-20 05:05:29 +03:00
if ( table )
return _get_table_val ( table , div ) ;
2012-05-17 14:22:13 +04:00
return div - 1 ;
}
2012-03-16 10:11:20 +04:00
2015-01-20 05:05:29 +03:00
unsigned long divider_recalc_rate ( struct clk_hw * hw , unsigned long parent_rate ,
unsigned int val ,
const struct clk_div_table * table ,
2017-12-21 19:30:54 +03:00
unsigned long flags , unsigned long width )
2012-03-16 10:11:20 +04:00
{
2015-01-20 05:05:29 +03:00
unsigned int div ;
2012-03-16 10:11:20 +04:00
2017-12-21 19:30:54 +03:00
div = _get_div ( table , val , flags , width ) ;
2012-05-17 14:22:13 +04:00
if ( ! div ) {
2015-01-20 05:05:29 +03:00
WARN ( ! ( flags & CLK_DIVIDER_ALLOW_ZERO ) ,
2013-04-03 02:36:56 +04:00
" %s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set \n " ,
2015-07-31 03:20:57 +03:00
clk_hw_get_name ( hw ) ) ;
2012-05-17 14:22:13 +04:00
return parent_rate ;
}
2012-03-16 10:11:20 +04:00
2015-04-14 02:03:21 +03:00
return DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , div ) ;
2012-03-16 10:11:20 +04:00
}
2015-01-20 05:05:29 +03:00
EXPORT_SYMBOL_GPL ( divider_recalc_rate ) ;
static unsigned long clk_divider_recalc_rate ( struct clk_hw * hw ,
unsigned long parent_rate )
{
struct clk_divider * divider = to_clk_divider ( hw ) ;
unsigned int val ;
2019-04-18 14:12:04 +03:00
val = clk_div_readl ( divider ) > > divider - > shift ;
2018-02-14 16:43:33 +03:00
val & = clk_div_mask ( divider - > width ) ;
2015-01-20 05:05:29 +03:00
return divider_recalc_rate ( hw , parent_rate , val , divider - > table ,
2017-12-21 19:30:54 +03:00
divider - > flags , divider - > width ) ;
2015-01-20 05:05:29 +03:00
}
2012-03-16 10:11:20 +04:00
2012-06-29 17:36:32 +04:00
static bool _is_valid_table_div ( const struct clk_div_table * table ,
unsigned int div )
{
const struct clk_div_table * clkt ;
for ( clkt = table ; clkt - > div ; clkt + + )
if ( clkt - > div = = div )
return true ;
return false ;
}
2015-01-20 05:05:29 +03:00
static bool _is_valid_div ( const struct clk_div_table * table , unsigned int div ,
unsigned long flags )
2012-06-29 17:36:32 +04:00
{
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO )
2013-01-15 14:28:05 +04:00
return is_power_of_2 ( div ) ;
2015-01-20 05:05:29 +03:00
if ( table )
return _is_valid_table_div ( table , div ) ;
2012-06-29 17:36:32 +04:00
return true ;
}
2014-01-29 20:24:06 +04:00
static int _round_up_table ( const struct clk_div_table * table , int div )
{
const struct clk_div_table * clkt ;
2014-05-07 20:48:52 +04:00
int up = INT_MAX ;
2014-01-29 20:24:06 +04:00
for ( clkt = table ; clkt - > div ; clkt + + ) {
if ( clkt - > div = = div )
return clkt - > div ;
else if ( clkt - > div < div )
continue ;
if ( ( clkt - > div - div ) < ( up - div ) )
up = clkt - > div ;
}
return up ;
}
2014-01-29 20:24:07 +04:00
static int _round_down_table ( const struct clk_div_table * table , int div )
{
const struct clk_div_table * clkt ;
int down = _get_table_mindiv ( table ) ;
for ( clkt = table ; clkt - > div ; clkt + + ) {
if ( clkt - > div = = div )
return clkt - > div ;
else if ( clkt - > div > div )
continue ;
if ( ( div - clkt - > div ) < ( div - down ) )
down = clkt - > div ;
}
return down ;
}
2015-01-20 05:05:29 +03:00
static int _div_round_up ( const struct clk_div_table * table ,
unsigned long parent_rate , unsigned long rate ,
unsigned long flags )
2014-01-29 20:24:06 +04:00
{
2015-04-14 02:03:21 +03:00
int div = DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , rate ) ;
2014-01-29 20:24:06 +04:00
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO )
2014-01-29 20:24:06 +04:00
div = __roundup_pow_of_two ( div ) ;
2015-01-20 05:05:29 +03:00
if ( table )
div = _round_up_table ( table , div ) ;
2014-01-29 20:24:06 +04:00
return div ;
}
2015-01-20 05:05:29 +03:00
static int _div_round_closest ( const struct clk_div_table * table ,
unsigned long parent_rate , unsigned long rate ,
unsigned long flags )
2014-01-29 20:24:07 +04:00
{
2015-02-21 13:40:25 +03:00
int up , down ;
2015-02-21 13:40:24 +03:00
unsigned long up_rate , down_rate ;
2014-01-29 20:24:07 +04:00
2015-04-14 02:03:21 +03:00
up = DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , rate ) ;
2015-02-21 13:40:25 +03:00
down = parent_rate / rate ;
2014-01-29 20:24:07 +04:00
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO ) {
2015-02-21 13:40:25 +03:00
up = __roundup_pow_of_two ( up ) ;
down = __rounddown_pow_of_two ( down ) ;
2015-01-20 05:05:29 +03:00
} else if ( table ) {
2015-02-21 13:40:25 +03:00
up = _round_up_table ( table , up ) ;
down = _round_down_table ( table , down ) ;
2014-01-29 20:24:07 +04:00
}
2015-04-14 02:03:21 +03:00
up_rate = DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , up ) ;
down_rate = DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , down ) ;
2015-02-21 13:40:24 +03:00
return ( rate - up_rate ) < = ( down_rate - rate ) ? up : down ;
2014-01-29 20:24:07 +04:00
}
2015-01-20 05:05:29 +03:00
static int _div_round ( const struct clk_div_table * table ,
unsigned long parent_rate , unsigned long rate ,
unsigned long flags )
2014-01-29 20:24:07 +04:00
{
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_ROUND_CLOSEST )
return _div_round_closest ( table , parent_rate , rate , flags ) ;
2014-01-29 20:24:07 +04:00
2015-01-20 05:05:29 +03:00
return _div_round_up ( table , parent_rate , rate , flags ) ;
2014-01-29 20:24:07 +04:00
}
2015-01-20 05:05:29 +03:00
static bool _is_best_div ( unsigned long rate , unsigned long now ,
unsigned long best , unsigned long flags )
2014-01-29 20:24:07 +04:00
{
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_ROUND_CLOSEST )
2014-01-29 20:24:07 +04:00
return abs ( rate - now ) < abs ( rate - best ) ;
return now < = rate & & now > best ;
}
2015-01-20 05:05:29 +03:00
static int _next_div ( const struct clk_div_table * table , int div ,
unsigned long flags )
2014-01-29 20:24:08 +04:00
{
div + + ;
2015-01-20 05:05:29 +03:00
if ( flags & CLK_DIVIDER_POWER_OF_TWO )
2014-01-29 20:24:08 +04:00
return __roundup_pow_of_two ( div ) ;
2015-01-20 05:05:29 +03:00
if ( table )
return _round_up_table ( table , div ) ;
2014-01-29 20:24:08 +04:00
return div ;
}
2017-05-17 10:40:30 +03:00
static int clk_divider_bestdiv ( struct clk_hw * hw , struct clk_hw * parent ,
unsigned long rate ,
2015-01-20 05:05:29 +03:00
unsigned long * best_parent_rate ,
const struct clk_div_table * table , u8 width ,
unsigned long flags )
2012-03-16 10:11:20 +04:00
{
int i , bestdiv = 0 ;
unsigned long parent_rate , best = 0 , now , maxdiv ;
2013-06-02 18:20:55 +04:00
unsigned long parent_rate_saved = * best_parent_rate ;
2012-03-16 10:11:20 +04:00
if ( ! rate )
rate = 1 ;
2015-01-20 05:05:29 +03:00
maxdiv = _get_maxdiv ( table , width , flags ) ;
2012-03-16 10:11:20 +04:00
2015-06-30 02:56:30 +03:00
if ( ! ( clk_hw_get_flags ( hw ) & CLK_SET_RATE_PARENT ) ) {
2012-04-12 16:50:17 +04:00
parent_rate = * best_parent_rate ;
2015-01-20 05:05:29 +03:00
bestdiv = _div_round ( table , parent_rate , rate , flags ) ;
2012-03-16 10:11:20 +04:00
bestdiv = bestdiv = = 0 ? 1 : bestdiv ;
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv ;
return bestdiv ;
}
/*
* The maximum divider we can use without overflowing
* unsigned long in rate * i below
*/
maxdiv = min ( ULONG_MAX / rate , maxdiv ) ;
2016-01-05 06:43:41 +03:00
for ( i = _next_div ( table , 0 , flags ) ; i < = maxdiv ;
i = _next_div ( table , i , flags ) ) {
2013-06-02 18:20:55 +04:00
if ( rate * i = = parent_rate_saved ) {
/*
* It ' s the most ideal case if the requested rate can be
* divided from parent clock without needing to change
* parent rate , so return the divider immediately .
*/
* best_parent_rate = parent_rate_saved ;
return i ;
}
2017-05-17 10:40:30 +03:00
parent_rate = clk_hw_round_rate ( parent , rate * i ) ;
2015-04-14 02:03:21 +03:00
now = DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , i ) ;
2015-01-20 05:05:29 +03:00
if ( _is_best_div ( rate , now , best , flags ) ) {
2012-03-16 10:11:20 +04:00
bestdiv = i ;
best = now ;
* best_parent_rate = parent_rate ;
}
}
if ( ! bestdiv ) {
2015-01-20 05:05:29 +03:00
bestdiv = _get_maxdiv ( table , width , flags ) ;
2017-05-17 10:40:30 +03:00
* best_parent_rate = clk_hw_round_rate ( parent , 1 ) ;
2012-03-16 10:11:20 +04:00
}
return bestdiv ;
}
2021-06-28 01:39:57 +03:00
int divider_determine_rate ( struct clk_hw * hw , struct clk_rate_request * req ,
const struct clk_div_table * table , u8 width ,
unsigned long flags )
{
int div ;
div = clk_divider_bestdiv ( hw , req - > best_parent_hw , req - > rate ,
& req - > best_parent_rate , table , width , flags ) ;
req - > rate = DIV_ROUND_UP_ULL ( ( u64 ) req - > best_parent_rate , div ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( divider_determine_rate ) ;
int divider_ro_determine_rate ( struct clk_hw * hw , struct clk_rate_request * req ,
const struct clk_div_table * table , u8 width ,
unsigned long flags , unsigned int val )
{
int div ;
div = _get_div ( table , val , flags , width ) ;
/* Even a read-only clock can propagate a rate change */
if ( clk_hw_get_flags ( hw ) & CLK_SET_RATE_PARENT ) {
if ( ! req - > best_parent_hw )
return - EINVAL ;
req - > best_parent_rate = clk_hw_round_rate ( req - > best_parent_hw ,
req - > rate * div ) ;
}
req - > rate = DIV_ROUND_UP_ULL ( ( u64 ) req - > best_parent_rate , div ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( divider_ro_determine_rate ) ;
2017-05-17 10:40:30 +03:00
long divider_round_rate_parent ( struct clk_hw * hw , struct clk_hw * parent ,
unsigned long rate , unsigned long * prate ,
const struct clk_div_table * table ,
u8 width , unsigned long flags )
2012-03-16 10:11:20 +04:00
{
2022-08-16 14:25:21 +03:00
struct clk_rate_request req ;
2021-06-28 01:39:57 +03:00
int ret ;
2022-08-16 14:25:21 +03:00
clk_hw_init_rate_request ( hw , & req , rate ) ;
req . best_parent_rate = * prate ;
req . best_parent_hw = parent ;
2021-06-28 01:39:57 +03:00
ret = divider_determine_rate ( hw , & req , table , width , flags ) ;
if ( ret )
return ret ;
2015-01-20 05:05:29 +03:00
2021-06-28 01:39:57 +03:00
* prate = req . best_parent_rate ;
2012-03-16 10:11:20 +04:00
2021-06-28 01:39:57 +03:00
return req . rate ;
2012-03-16 10:11:20 +04:00
}
2017-05-17 10:40:30 +03:00
EXPORT_SYMBOL_GPL ( divider_round_rate_parent ) ;
2012-03-16 10:11:20 +04:00
2018-02-14 16:43:39 +03:00
long divider_ro_round_rate_parent ( struct clk_hw * hw , struct clk_hw * parent ,
unsigned long rate , unsigned long * prate ,
const struct clk_div_table * table , u8 width ,
unsigned long flags , unsigned int val )
{
2022-08-16 14:25:21 +03:00
struct clk_rate_request req ;
2021-06-28 01:39:57 +03:00
int ret ;
2018-02-14 16:43:39 +03:00
2022-08-16 14:25:21 +03:00
clk_hw_init_rate_request ( hw , & req , rate ) ;
req . best_parent_rate = * prate ;
req . best_parent_hw = parent ;
2021-06-28 01:39:57 +03:00
ret = divider_ro_determine_rate ( hw , & req , table , width , flags , val ) ;
if ( ret )
return ret ;
2018-02-14 16:43:39 +03:00
2021-06-28 01:39:57 +03:00
* prate = req . best_parent_rate ;
2018-02-14 16:43:39 +03:00
2021-06-28 01:39:57 +03:00
return req . rate ;
2018-02-14 16:43:39 +03:00
}
EXPORT_SYMBOL_GPL ( divider_ro_round_rate_parent ) ;
2021-07-02 04:10:58 +03:00
static long clk_divider_round_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long * prate )
2012-03-16 10:11:20 +04:00
{
struct clk_divider * divider = to_clk_divider ( hw ) ;
2015-01-20 05:05:29 +03:00
/* if read only, just return current value */
if ( divider - > flags & CLK_DIVIDER_READ_ONLY ) {
2018-02-14 16:43:39 +03:00
u32 val ;
2019-04-18 14:12:04 +03:00
val = clk_div_readl ( divider ) > > divider - > shift ;
2018-02-14 16:43:39 +03:00
val & = clk_div_mask ( divider - > width ) ;
2021-07-02 04:10:58 +03:00
return divider_ro_round_rate ( hw , rate , prate , divider - > table ,
divider - > width , divider - > flags ,
val ) ;
2015-01-20 05:05:29 +03:00
}
2021-07-02 04:10:58 +03:00
return divider_round_rate ( hw , rate , prate , divider - > table ,
divider - > width , divider - > flags ) ;
2015-01-20 05:05:29 +03:00
}
2021-07-03 01:51:40 +03:00
static int clk_divider_determine_rate ( struct clk_hw * hw ,
struct clk_rate_request * req )
{
struct clk_divider * divider = to_clk_divider ( hw ) ;
/* if read only, just return current value */
if ( divider - > flags & CLK_DIVIDER_READ_ONLY ) {
u32 val ;
val = clk_div_readl ( divider ) > > divider - > shift ;
val & = clk_div_mask ( divider - > width ) ;
return divider_ro_determine_rate ( hw , req , divider - > table ,
divider - > width ,
divider - > flags , val ) ;
}
return divider_determine_rate ( hw , req , divider - > table , divider - > width ,
divider - > flags ) ;
}
2015-01-20 05:05:29 +03:00
int divider_get_val ( unsigned long rate , unsigned long parent_rate ,
const struct clk_div_table * table , u8 width ,
unsigned long flags )
{
2012-05-17 14:22:13 +04:00
unsigned int div , value ;
2012-03-16 10:11:20 +04:00
2015-04-14 02:03:21 +03:00
div = DIV_ROUND_UP_ULL ( ( u64 ) parent_rate , rate ) ;
2014-01-29 20:24:06 +04:00
2015-01-20 05:05:29 +03:00
if ( ! _is_valid_div ( table , div , flags ) )
2014-01-29 20:24:06 +04:00
return - EINVAL ;
2015-05-15 22:45:47 +03:00
value = _get_val ( table , div , flags , width ) ;
2015-01-20 05:05:29 +03:00
2018-02-14 16:43:33 +03:00
return min_t ( unsigned int , value , clk_div_mask ( width ) ) ;
2015-01-20 05:05:29 +03:00
}
EXPORT_SYMBOL_GPL ( divider_get_val ) ;
static int clk_divider_set_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long parent_rate )
{
struct clk_divider * divider = to_clk_divider ( hw ) ;
2017-07-25 13:18:40 +03:00
int value ;
2015-01-20 05:05:29 +03:00
unsigned long flags = 0 ;
u32 val ;
2012-03-16 10:11:20 +04:00
2015-01-20 05:05:29 +03:00
value = divider_get_val ( rate , parent_rate , divider - > table ,
divider - > width , divider - > flags ) ;
2017-07-25 13:18:40 +03:00
if ( value < 0 )
return value ;
2012-03-16 10:11:20 +04:00
if ( divider - > lock )
spin_lock_irqsave ( divider - > lock , flags ) ;
2015-07-24 22:21:12 +03:00
else
__acquire ( divider - > lock ) ;
2012-03-16 10:11:20 +04:00
2013-06-08 18:47:18 +04:00
if ( divider - > flags & CLK_DIVIDER_HIWORD_MASK ) {
2018-02-14 16:43:33 +03:00
val = clk_div_mask ( divider - > width ) < < ( divider - > shift + 16 ) ;
2013-06-08 18:47:18 +04:00
} else {
2019-04-18 14:12:04 +03:00
val = clk_div_readl ( divider ) ;
2018-02-14 16:43:33 +03:00
val & = ~ ( clk_div_mask ( divider - > width ) < < divider - > shift ) ;
2013-06-08 18:47:18 +04:00
}
2017-07-25 13:18:40 +03:00
val | = ( u32 ) value < < divider - > shift ;
2019-04-18 14:12:04 +03:00
clk_div_writel ( divider , val ) ;
2012-03-16 10:11:20 +04:00
if ( divider - > lock )
spin_unlock_irqrestore ( divider - > lock , flags ) ;
2015-07-24 22:21:12 +03:00
else
__release ( divider - > lock ) ;
2012-03-16 10:11:20 +04:00
return 0 ;
}
2012-03-27 11:23:22 +04:00
const struct clk_ops clk_divider_ops = {
2012-03-16 10:11:20 +04:00
. recalc_rate = clk_divider_recalc_rate ,
2021-07-02 04:10:58 +03:00
. round_rate = clk_divider_round_rate ,
2021-07-03 01:51:40 +03:00
. determine_rate = clk_divider_determine_rate ,
2012-03-16 10:11:20 +04:00
. set_rate = clk_divider_set_rate ,
} ;
EXPORT_SYMBOL_GPL ( clk_divider_ops ) ;
2016-01-21 23:53:09 +03:00
const struct clk_ops clk_divider_ro_ops = {
. recalc_rate = clk_divider_recalc_rate ,
2021-07-02 04:10:58 +03:00
. round_rate = clk_divider_round_rate ,
2021-07-03 01:51:40 +03:00
. determine_rate = clk_divider_determine_rate ,
2016-01-21 23:53:09 +03:00
} ;
EXPORT_SYMBOL_GPL ( clk_divider_ro_ops ) ;
2019-08-30 18:09:23 +03:00
struct clk_hw * __clk_hw_register_divider ( struct device * dev ,
struct device_node * np , const char * name ,
const char * parent_name , const struct clk_hw * parent_hw ,
const struct clk_parent_data * parent_data , unsigned long flags ,
void __iomem * reg , u8 shift , u8 width , u8 clk_divider_flags ,
const struct clk_div_table * table , spinlock_t * lock )
2012-03-16 10:11:20 +04:00
{
struct clk_divider * div ;
2016-02-07 10:26:37 +03:00
struct clk_hw * hw ;
2019-11-15 19:28:55 +03:00
struct clk_init_data init = { } ;
2016-02-07 10:26:37 +03:00
int ret ;
2012-03-16 10:11:20 +04:00
2013-06-08 18:47:18 +04:00
if ( clk_divider_flags & CLK_DIVIDER_HIWORD_MASK ) {
if ( width + shift > 16 ) {
pr_warn ( " divider value exceeds LOWORD field \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
}
2012-03-27 04:51:03 +04:00
/* allocate the divider */
2015-05-15 02:47:10 +03:00
div = kzalloc ( sizeof ( * div ) , GFP_KERNEL ) ;
if ( ! div )
2012-03-27 04:51:03 +04:00
return ERR_PTR ( - ENOMEM ) ;
2012-03-16 10:11:20 +04:00
2012-04-26 09:58:56 +04:00
init . name = name ;
2016-01-21 23:53:09 +03:00
if ( clk_divider_flags & CLK_DIVIDER_READ_ONLY )
init . ops = & clk_divider_ro_ops ;
else
init . ops = & clk_divider_ops ;
2019-04-25 20:57:37 +03:00
init . flags = flags ;
2021-01-21 10:16:46 +03:00
init . parent_names = parent_name ? & parent_name : NULL ;
init . parent_hws = parent_hw ? & parent_hw : NULL ;
init . parent_data = parent_data ;
if ( parent_name | | parent_hw | | parent_data )
init . num_parents = 1 ;
else
init . num_parents = 0 ;
2012-04-26 09:58:56 +04:00
2012-03-16 10:11:20 +04:00
/* struct clk_divider assignments */
div - > reg = reg ;
div - > shift = shift ;
div - > width = width ;
div - > flags = clk_divider_flags ;
div - > lock = lock ;
2012-04-26 09:58:56 +04:00
div - > hw . init = & init ;
2012-06-29 17:36:32 +04:00
div - > table = table ;
2012-03-16 10:11:20 +04:00
2012-03-27 04:51:03 +04:00
/* register the clock */
2016-02-07 10:26:37 +03:00
hw = & div - > hw ;
ret = clk_hw_register ( dev , hw ) ;
if ( ret ) {
2012-03-27 04:51:03 +04:00
kfree ( div ) ;
2016-02-07 10:26:37 +03:00
hw = ERR_PTR ( ret ) ;
}
2012-03-16 10:11:20 +04:00
2016-02-07 10:26:37 +03:00
return hw ;
2012-03-16 10:11:20 +04:00
}
2019-08-30 18:09:23 +03:00
EXPORT_SYMBOL_GPL ( __clk_hw_register_divider ) ;
2016-02-07 10:26:37 +03:00
2012-06-29 17:36:32 +04:00
/**
* clk_register_divider_table - register a table based divider clock with
* the clock framework
* @ dev : device registering this clock
* @ name : name of this clock
* @ parent_name : name of clock ' s parent
* @ flags : framework - specific flags
* @ reg : register address to adjust divider
* @ shift : number of bits to shift the bitfield
* @ width : width of the bitfield
* @ clk_divider_flags : divider - specific flags for this clock
* @ table : array of divider / value pairs ending with a div set to 0
* @ lock : shared register lock for this clock
*/
struct clk * clk_register_divider_table ( struct device * dev , const char * name ,
const char * parent_name , unsigned long flags ,
void __iomem * reg , u8 shift , u8 width ,
u8 clk_divider_flags , const struct clk_div_table * table ,
spinlock_t * lock )
{
2016-02-07 10:26:37 +03:00
struct clk_hw * hw ;
2019-08-30 18:09:23 +03:00
hw = __clk_hw_register_divider ( dev , NULL , name , parent_name , NULL ,
NULL , flags , reg , shift , width , clk_divider_flags ,
table , lock ) ;
2016-02-07 10:26:37 +03:00
if ( IS_ERR ( hw ) )
return ERR_CAST ( hw ) ;
return hw - > clk ;
2012-06-29 17:36:32 +04:00
}
2013-08-02 20:14:07 +04:00
EXPORT_SYMBOL_GPL ( clk_register_divider_table ) ;
2015-01-05 12:52:40 +03:00
void clk_unregister_divider ( struct clk * clk )
{
struct clk_divider * div ;
struct clk_hw * hw ;
hw = __clk_get_hw ( clk ) ;
if ( ! hw )
return ;
div = to_clk_divider ( hw ) ;
clk_unregister ( clk ) ;
kfree ( div ) ;
}
EXPORT_SYMBOL_GPL ( clk_unregister_divider ) ;
2016-02-07 10:26:37 +03:00
/**
* clk_hw_unregister_divider - unregister a clk divider
* @ hw : hardware - specific clock data to unregister
*/
void clk_hw_unregister_divider ( struct clk_hw * hw )
{
struct clk_divider * div ;
div = to_clk_divider ( hw ) ;
clk_hw_unregister ( hw ) ;
kfree ( div ) ;
}
EXPORT_SYMBOL_GPL ( clk_hw_unregister_divider ) ;
2020-11-08 21:51:09 +03:00
static void devm_clk_hw_release_divider ( struct device * dev , void * res )
{
clk_hw_unregister_divider ( * ( struct clk_hw * * ) res ) ;
}
struct clk_hw * __devm_clk_hw_register_divider ( struct device * dev ,
struct device_node * np , const char * name ,
const char * parent_name , const struct clk_hw * parent_hw ,
const struct clk_parent_data * parent_data , unsigned long flags ,
void __iomem * reg , u8 shift , u8 width , u8 clk_divider_flags ,
const struct clk_div_table * table , spinlock_t * lock )
{
struct clk_hw * * ptr , * hw ;
ptr = devres_alloc ( devm_clk_hw_release_divider , sizeof ( * ptr ) , GFP_KERNEL ) ;
if ( ! ptr )
return ERR_PTR ( - ENOMEM ) ;
hw = __clk_hw_register_divider ( dev , np , name , parent_name , parent_hw ,
parent_data , flags , reg , shift , width ,
clk_divider_flags , table , lock ) ;
if ( ! IS_ERR ( hw ) ) {
* ptr = hw ;
devres_add ( dev , ptr ) ;
} else {
devres_free ( ptr ) ;
}
return hw ;
}
EXPORT_SYMBOL_GPL ( __devm_clk_hw_register_divider ) ;