2018-05-22 18:34:55 +02:00
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright ( c ) 2018 BayLibre , SAS .
* Author : Jerome Brunet < jbrunet @ baylibre . com >
*
* Sample clock generator divider :
* This HW divider gates with value 0 but is otherwise a zero based divider :
*
* val > = 1
* divider = val + 1
*
* The duty cycle may also be set for the LR clock variant . The duty cycle
* ratio is :
*
* hi = [ 0 - val ]
* duty_cycle = ( 1 + hi ) / ( 1 + val )
*/
clk: meson: rework and clean drivers dependencies
Initially, the meson clock directory only hosted 2 controllers drivers,
for meson8 and gxbb. At the time, both used the same set of clock drivers
so managing the dependencies was not a big concern.
Since this ancient time, entropy did its job, controllers with different
requirement and specific clock drivers have been added. Unfortunately, we
did not do a great job at managing the dependencies between the
controllers and the different clock drivers. Some drivers, such as
clk-phase or vid-pll-div, are compiled even if they are useless on the
target (meson8). As we are adding new controllers, we need to be able to
pick a driver w/o pulling the whole thing.
The patch aims to clean things up by:
* providing a dedicated CONFIG_ for each clock drivers
* allowing clock drivers to be compiled as a modules, if possible
* stating explicitly which drivers are required by each controller.
Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
Link: https://lkml.kernel.org/r/20190201125841.26785-5-jbrunet@baylibre.com
2019-02-01 13:58:41 +01:00
# include <linux/clk-provider.h>
# include <linux/module.h>
# include "clk-regmap.h"
# include "sclk-div.h"
2018-05-22 18:34:55 +02:00
static inline struct meson_sclk_div_data *
meson_sclk_div_data ( struct clk_regmap * clk )
{
return ( struct meson_sclk_div_data * ) clk - > data ;
}
static int sclk_div_maxval ( struct meson_sclk_div_data * sclk )
{
return ( 1 < < sclk - > div . width ) - 1 ;
}
static int sclk_div_maxdiv ( struct meson_sclk_div_data * sclk )
{
return sclk_div_maxval ( sclk ) + 1 ;
}
static int sclk_div_getdiv ( struct clk_hw * hw , unsigned long rate ,
unsigned long prate , int maxdiv )
{
int div = DIV_ROUND_CLOSEST_ULL ( ( u64 ) prate , rate ) ;
return clamp ( div , 2 , maxdiv ) ;
}
static int sclk_div_bestdiv ( struct clk_hw * hw , unsigned long rate ,
unsigned long * prate ,
struct meson_sclk_div_data * sclk )
{
struct clk_hw * parent = clk_hw_get_parent ( hw ) ;
int bestdiv = 0 , i ;
unsigned long maxdiv , now , parent_now ;
unsigned long best = 0 , best_parent = 0 ;
if ( ! rate )
rate = 1 ;
maxdiv = sclk_div_maxdiv ( sclk ) ;
if ( ! ( clk_hw_get_flags ( hw ) & CLK_SET_RATE_PARENT ) )
return sclk_div_getdiv ( hw , rate , * prate , maxdiv ) ;
/*
* The maximum divider we can use without overflowing
* unsigned long in rate * i below
*/
maxdiv = min ( ULONG_MAX / rate , maxdiv ) ;
for ( i = 2 ; i < = maxdiv ; i + + ) {
/*
* It ' s the most ideal case if the requested rate can be
* divided from parent clock without needing to change
* parent rate , so return the divider immediately .
*/
if ( rate * i = = * prate )
return i ;
parent_now = clk_hw_round_rate ( parent , rate * i ) ;
now = DIV_ROUND_UP_ULL ( ( u64 ) parent_now , i ) ;
if ( abs ( rate - now ) < abs ( rate - best ) ) {
bestdiv = i ;
best = now ;
best_parent = parent_now ;
}
}
if ( ! bestdiv )
bestdiv = sclk_div_maxdiv ( sclk ) ;
else
* prate = best_parent ;
return bestdiv ;
}
static long sclk_div_round_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long * prate )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
int div ;
div = sclk_div_bestdiv ( hw , rate , prate , sclk ) ;
return DIV_ROUND_UP_ULL ( ( u64 ) * prate , div ) ;
}
static void sclk_apply_ratio ( struct clk_regmap * clk ,
struct meson_sclk_div_data * sclk )
{
unsigned int hi = DIV_ROUND_CLOSEST ( sclk - > cached_div *
sclk - > cached_duty . num ,
sclk - > cached_duty . den ) ;
if ( hi )
hi - = 1 ;
meson_parm_write ( clk - > map , & sclk - > hi , hi ) ;
}
static int sclk_div_set_duty_cycle ( struct clk_hw * hw ,
struct clk_duty * duty )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
if ( MESON_PARM_APPLICABLE ( & sclk - > hi ) ) {
memcpy ( & sclk - > cached_duty , duty , sizeof ( * duty ) ) ;
sclk_apply_ratio ( clk , sclk ) ;
}
return 0 ;
}
static int sclk_div_get_duty_cycle ( struct clk_hw * hw ,
struct clk_duty * duty )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
int hi ;
if ( ! MESON_PARM_APPLICABLE ( & sclk - > hi ) ) {
duty - > num = 1 ;
duty - > den = 2 ;
return 0 ;
}
hi = meson_parm_read ( clk - > map , & sclk - > hi ) ;
duty - > num = hi + 1 ;
duty - > den = sclk - > cached_div ;
return 0 ;
}
static void sclk_apply_divider ( struct clk_regmap * clk ,
struct meson_sclk_div_data * sclk )
{
if ( MESON_PARM_APPLICABLE ( & sclk - > hi ) )
sclk_apply_ratio ( clk , sclk ) ;
meson_parm_write ( clk - > map , & sclk - > div , sclk - > cached_div - 1 ) ;
}
static int sclk_div_set_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long prate )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
unsigned long maxdiv = sclk_div_maxdiv ( sclk ) ;
sclk - > cached_div = sclk_div_getdiv ( hw , rate , prate , maxdiv ) ;
if ( clk_hw_is_enabled ( hw ) )
sclk_apply_divider ( clk , sclk ) ;
return 0 ;
}
static unsigned long sclk_div_recalc_rate ( struct clk_hw * hw ,
unsigned long prate )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
return DIV_ROUND_UP_ULL ( ( u64 ) prate , sclk - > cached_div ) ;
}
static int sclk_div_enable ( struct clk_hw * hw )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
sclk_apply_divider ( clk , sclk ) ;
return 0 ;
}
static void sclk_div_disable ( struct clk_hw * hw )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
meson_parm_write ( clk - > map , & sclk - > div , 0 ) ;
}
static int sclk_div_is_enabled ( struct clk_hw * hw )
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
if ( meson_parm_read ( clk - > map , & sclk - > div ) )
return 1 ;
return 0 ;
}
2019-09-24 14:39:53 +02:00
static int sclk_div_init ( struct clk_hw * hw )
2018-05-22 18:34:55 +02:00
{
struct clk_regmap * clk = to_clk_regmap ( hw ) ;
struct meson_sclk_div_data * sclk = meson_sclk_div_data ( clk ) ;
unsigned int val ;
val = meson_parm_read ( clk - > map , & sclk - > div ) ;
/* if the divider is initially disabled, assume max */
if ( ! val )
sclk - > cached_div = sclk_div_maxdiv ( sclk ) ;
else
sclk - > cached_div = val + 1 ;
sclk_div_get_duty_cycle ( hw , & sclk - > cached_duty ) ;
2019-09-24 14:39:53 +02:00
return 0 ;
2018-05-22 18:34:55 +02:00
}
const struct clk_ops meson_sclk_div_ops = {
. recalc_rate = sclk_div_recalc_rate ,
. round_rate = sclk_div_round_rate ,
. set_rate = sclk_div_set_rate ,
. enable = sclk_div_enable ,
. disable = sclk_div_disable ,
. is_enabled = sclk_div_is_enabled ,
. get_duty_cycle = sclk_div_get_duty_cycle ,
. set_duty_cycle = sclk_div_set_duty_cycle ,
. init = sclk_div_init ,
} ;
EXPORT_SYMBOL_GPL ( meson_sclk_div_ops ) ;
clk: meson: rework and clean drivers dependencies
Initially, the meson clock directory only hosted 2 controllers drivers,
for meson8 and gxbb. At the time, both used the same set of clock drivers
so managing the dependencies was not a big concern.
Since this ancient time, entropy did its job, controllers with different
requirement and specific clock drivers have been added. Unfortunately, we
did not do a great job at managing the dependencies between the
controllers and the different clock drivers. Some drivers, such as
clk-phase or vid-pll-div, are compiled even if they are useless on the
target (meson8). As we are adding new controllers, we need to be able to
pick a driver w/o pulling the whole thing.
The patch aims to clean things up by:
* providing a dedicated CONFIG_ for each clock drivers
* allowing clock drivers to be compiled as a modules, if possible
* stating explicitly which drivers are required by each controller.
Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
Link: https://lkml.kernel.org/r/20190201125841.26785-5-jbrunet@baylibre.com
2019-02-01 13:58:41 +01:00
MODULE_DESCRIPTION ( " Amlogic Sample divider driver " ) ;
MODULE_AUTHOR ( " Jerome Brunet <jbrunet@baylibre.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;