2017-06-13 19:19:36 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Power Interface ( SCMI ) Protocol based clock driver
*
2022-02-17 16:12:34 +03:00
* Copyright ( C ) 2018 - 2022 ARM Ltd .
2017-06-13 19:19:36 +03:00
*/
# include <linux/clk-provider.h>
# include <linux/device.h>
# include <linux/err.h>
# include <linux/of.h>
# include <linux/module.h>
# include <linux/scmi_protocol.h>
# include <asm/div64.h>
2023-08-26 15:53:03 +03:00
# define NOT_ATOMIC false
# define ATOMIC true
2021-03-16 15:48:43 +03:00
static const struct scmi_clk_proto_ops * scmi_proto_clk_ops ;
2017-06-13 19:19:36 +03:00
struct scmi_clk {
u32 id ;
2023-08-26 15:53:07 +03:00
struct device * dev ;
2017-06-13 19:19:36 +03:00
struct clk_hw hw ;
const struct scmi_clock_info * info ;
2021-03-16 15:48:43 +03:00
const struct scmi_protocol_handle * ph ;
2017-06-13 19:19:36 +03:00
} ;
# define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
static unsigned long scmi_clk_recalc_rate ( struct clk_hw * hw ,
unsigned long parent_rate )
{
int ret ;
u64 rate ;
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
2021-03-16 15:48:43 +03:00
ret = scmi_proto_clk_ops - > rate_get ( clk - > ph , clk - > id , & rate ) ;
2017-06-13 19:19:36 +03:00
if ( ret )
return 0 ;
return rate ;
}
static long scmi_clk_round_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long * parent_rate )
{
u64 fmin , fmax , ftmp ;
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
/*
* We can ' t figure out what rate it will be , so just return the
* rate back to the caller . scmi_clk_recalc_rate ( ) will be called
* after the rate is set and we ' ll know what rate the clock is
* running at then .
*/
if ( clk - > info - > rate_discrete )
return rate ;
fmin = clk - > info - > range . min_rate ;
fmax = clk - > info - > range . max_rate ;
if ( rate < = fmin )
return fmin ;
else if ( rate > = fmax )
return fmax ;
ftmp = rate - fmin ;
ftmp + = clk - > info - > range . step_size - 1 ; /* to round up */
2018-07-31 08:55:55 +03:00
do_div ( ftmp , clk - > info - > range . step_size ) ;
2017-06-13 19:19:36 +03:00
2018-07-31 08:55:55 +03:00
return ftmp * clk - > info - > range . step_size + fmin ;
2017-06-13 19:19:36 +03:00
}
static int scmi_clk_set_rate ( struct clk_hw * hw , unsigned long rate ,
unsigned long parent_rate )
{
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
2021-03-16 15:48:43 +03:00
return scmi_proto_clk_ops - > rate_set ( clk - > ph , clk - > id , rate ) ;
2017-06-13 19:19:36 +03:00
}
static int scmi_clk_enable ( struct clk_hw * hw )
{
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
2023-08-26 15:53:03 +03:00
return scmi_proto_clk_ops - > enable ( clk - > ph , clk - > id , NOT_ATOMIC ) ;
2017-06-13 19:19:36 +03:00
}
static void scmi_clk_disable ( struct clk_hw * hw )
{
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
2023-08-26 15:53:03 +03:00
scmi_proto_clk_ops - > disable ( clk - > ph , clk - > id , NOT_ATOMIC ) ;
2017-06-13 19:19:36 +03:00
}
2022-02-17 16:12:34 +03:00
static int scmi_clk_atomic_enable ( struct clk_hw * hw )
{
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
2023-08-26 15:53:03 +03:00
return scmi_proto_clk_ops - > enable ( clk - > ph , clk - > id , ATOMIC ) ;
2022-02-17 16:12:34 +03:00
}
static void scmi_clk_atomic_disable ( struct clk_hw * hw )
{
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
2023-08-26 15:53:03 +03:00
scmi_proto_clk_ops - > disable ( clk - > ph , clk - > id , ATOMIC ) ;
2022-02-17 16:12:34 +03:00
}
2023-08-26 15:53:07 +03:00
static int scmi_clk_atomic_is_enabled ( struct clk_hw * hw )
{
int ret ;
bool enabled = false ;
struct scmi_clk * clk = to_scmi_clk ( hw ) ;
ret = scmi_proto_clk_ops - > state_get ( clk - > ph , clk - > id , & enabled , ATOMIC ) ;
if ( ret )
dev_warn ( clk - > dev ,
" Failed to get state for clock ID %d \n " , clk - > id ) ;
return ! ! enabled ;
}
2022-02-17 16:12:34 +03:00
/*
2023-08-26 15:53:07 +03:00
* We can provide enable / disable / is_enabled atomic callbacks only if the
* underlying SCMI transport for an SCMI instance is configured to handle
* SCMI commands in an atomic manner .
2022-02-17 16:12:34 +03:00
*
* When no SCMI atomic transport support is available we instead provide only
* the prepare / unprepare API , as allowed by the clock framework when atomic
* calls are not available .
*
* Two distinct sets of clk_ops are provided since we could have multiple SCMI
* instances with different underlying transport quality , so they cannot be
* shared .
*/
2017-06-13 19:19:36 +03:00
static const struct clk_ops scmi_clk_ops = {
. recalc_rate = scmi_clk_recalc_rate ,
. round_rate = scmi_clk_round_rate ,
. set_rate = scmi_clk_set_rate ,
. prepare = scmi_clk_enable ,
. unprepare = scmi_clk_disable ,
} ;
2022-02-17 16:12:34 +03:00
static const struct clk_ops scmi_atomic_clk_ops = {
. recalc_rate = scmi_clk_recalc_rate ,
. round_rate = scmi_clk_round_rate ,
. set_rate = scmi_clk_set_rate ,
. enable = scmi_clk_atomic_enable ,
. disable = scmi_clk_atomic_disable ,
2023-08-26 15:53:07 +03:00
. is_enabled = scmi_clk_atomic_is_enabled ,
2022-02-17 16:12:34 +03:00
} ;
static int scmi_clk_ops_init ( struct device * dev , struct scmi_clk * sclk ,
const struct clk_ops * scmi_ops )
2017-06-13 19:19:36 +03:00
{
int ret ;
2020-07-09 11:17:05 +03:00
unsigned long min_rate , max_rate ;
2017-06-13 19:19:36 +03:00
struct clk_init_data init = {
. flags = CLK_GET_RATE_NOCACHE ,
. num_parents = 0 ,
2022-02-17 16:12:34 +03:00
. ops = scmi_ops ,
2017-06-13 19:19:36 +03:00
. name = sclk - > info - > name ,
} ;
sclk - > hw . init = & init ;
ret = devm_clk_hw_register ( dev , & sclk - > hw ) ;
2020-07-09 11:17:05 +03:00
if ( ret )
return ret ;
if ( sclk - > info - > rate_discrete ) {
int num_rates = sclk - > info - > list . num_rates ;
if ( num_rates < = 0 )
return - EINVAL ;
min_rate = sclk - > info - > list . rates [ 0 ] ;
max_rate = sclk - > info - > list . rates [ num_rates - 1 ] ;
} else {
min_rate = sclk - > info - > range . min_rate ;
max_rate = sclk - > info - > range . max_rate ;
}
clk_hw_set_rate_range ( & sclk - > hw , min_rate , max_rate ) ;
2017-06-13 19:19:36 +03:00
return ret ;
}
static int scmi_clocks_probe ( struct scmi_device * sdev )
{
int idx , count , err ;
2022-02-17 16:12:34 +03:00
unsigned int atomic_threshold ;
bool is_atomic ;
2017-06-13 19:19:36 +03:00
struct clk_hw * * hws ;
struct clk_hw_onecell_data * clk_data ;
struct device * dev = & sdev - > dev ;
struct device_node * np = dev - > of_node ;
const struct scmi_handle * handle = sdev - > handle ;
2021-03-16 15:48:43 +03:00
struct scmi_protocol_handle * ph ;
2017-06-13 19:19:36 +03:00
2021-03-16 15:48:43 +03:00
if ( ! handle )
2017-06-13 19:19:36 +03:00
return - ENODEV ;
2021-03-16 15:48:43 +03:00
scmi_proto_clk_ops =
handle - > devm_protocol_get ( sdev , SCMI_PROTOCOL_CLOCK , & ph ) ;
if ( IS_ERR ( scmi_proto_clk_ops ) )
return PTR_ERR ( scmi_proto_clk_ops ) ;
count = scmi_proto_clk_ops - > count_get ( ph ) ;
2017-06-13 19:19:36 +03:00
if ( count < 0 ) {
2018-08-28 18:44:29 +03:00
dev_err ( dev , " %pOFn: invalid clock output count \n " , np ) ;
2017-06-13 19:19:36 +03:00
return - EINVAL ;
}
treewide: Use struct_size() for devm_kmalloc() and friends
Replaces open-coded struct size calculations with struct_size() for
devm_*, f2fs_*, and sock_* allocations. Automatically generated (and
manually adjusted) from the following Coccinelle script:
// Direct reference to struct field.
@@
identifier alloc =~ "devm_kmalloc|devm_kzalloc|sock_kmalloc|f2fs_kmalloc|f2fs_kzalloc";
expression HANDLE;
expression GFP;
identifier VAR, ELEMENT;
expression COUNT;
@@
- alloc(HANDLE, sizeof(*VAR) + COUNT * sizeof(*VAR->ELEMENT), GFP)
+ alloc(HANDLE, struct_size(VAR, ELEMENT, COUNT), GFP)
// mr = kzalloc(sizeof(*mr) + m * sizeof(mr->map[0]), GFP_KERNEL);
@@
identifier alloc =~ "devm_kmalloc|devm_kzalloc|sock_kmalloc|f2fs_kmalloc|f2fs_kzalloc";
expression HANDLE;
expression GFP;
identifier VAR, ELEMENT;
expression COUNT;
@@
- alloc(HANDLE, sizeof(*VAR) + COUNT * sizeof(VAR->ELEMENT[0]), GFP)
+ alloc(HANDLE, struct_size(VAR, ELEMENT, COUNT), GFP)
// Same pattern, but can't trivially locate the trailing element name,
// or variable name.
@@
identifier alloc =~ "devm_kmalloc|devm_kzalloc|sock_kmalloc|f2fs_kmalloc|f2fs_kzalloc";
expression HANDLE;
expression GFP;
expression SOMETHING, COUNT, ELEMENT;
@@
- alloc(HANDLE, sizeof(SOMETHING) + COUNT * sizeof(ELEMENT), GFP)
+ alloc(HANDLE, CHECKME_struct_size(&SOMETHING, ELEMENT, COUNT), GFP)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-05-09 02:08:53 +03:00
clk_data = devm_kzalloc ( dev , struct_size ( clk_data , hws , count ) ,
GFP_KERNEL ) ;
2017-06-13 19:19:36 +03:00
if ( ! clk_data )
return - ENOMEM ;
clk_data - > num = count ;
hws = clk_data - > hws ;
2022-02-17 16:12:34 +03:00
is_atomic = handle - > is_transport_atomic ( handle , & atomic_threshold ) ;
2017-06-13 19:19:36 +03:00
for ( idx = 0 ; idx < count ; idx + + ) {
struct scmi_clk * sclk ;
2022-02-17 16:12:34 +03:00
const struct clk_ops * scmi_ops ;
2017-06-13 19:19:36 +03:00
sclk = devm_kzalloc ( dev , sizeof ( * sclk ) , GFP_KERNEL ) ;
if ( ! sclk )
return - ENOMEM ;
2021-03-16 15:48:43 +03:00
sclk - > info = scmi_proto_clk_ops - > info_get ( ph , idx ) ;
2017-06-13 19:19:36 +03:00
if ( ! sclk - > info ) {
dev_dbg ( dev , " invalid clock info for idx %d \n " , idx ) ;
2023-10-04 22:36:00 +03:00
devm_kfree ( dev , sclk ) ;
2017-06-13 19:19:36 +03:00
continue ;
}
sclk - > id = idx ;
2021-03-16 15:48:43 +03:00
sclk - > ph = ph ;
2023-08-26 15:53:07 +03:00
sclk - > dev = dev ;
2017-06-13 19:19:36 +03:00
2022-02-17 16:12:34 +03:00
/*
* Note that when transport is atomic but SCMI protocol did not
* specify ( or support ) an enable_latency associated with a
* clock , we default to use atomic operations mode .
*/
if ( is_atomic & &
sclk - > info - > enable_latency < = atomic_threshold )
scmi_ops = & scmi_atomic_clk_ops ;
else
scmi_ops = & scmi_clk_ops ;
err = scmi_clk_ops_init ( dev , sclk , scmi_ops ) ;
2017-06-13 19:19:36 +03:00
if ( err ) {
dev_err ( dev , " failed to register clock %d \n " , idx ) ;
devm_kfree ( dev , sclk ) ;
hws [ idx ] = NULL ;
} else {
2022-02-17 16:12:34 +03:00
dev_dbg ( dev , " Registered clock:%s%s \n " ,
sclk - > info - > name ,
scmi_ops = = & scmi_atomic_clk_ops ?
" (atomic ops) " : " " ) ;
2017-06-13 19:19:36 +03:00
hws [ idx ] = & sclk - > hw ;
}
}
2018-03-20 14:22:48 +03:00
return devm_of_clk_add_hw_provider ( dev , of_clk_hw_onecell_get ,
clk_data ) ;
2017-06-13 19:19:36 +03:00
}
static const struct scmi_device_id scmi_id_table [ ] = {
2019-11-06 20:55:47 +03:00
{ SCMI_PROTOCOL_CLOCK , " clocks " } ,
2017-06-13 19:19:36 +03:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( scmi , scmi_id_table ) ;
static struct scmi_driver scmi_clocks_driver = {
. name = " scmi-clocks " ,
. probe = scmi_clocks_probe ,
. id_table = scmi_id_table ,
} ;
module_scmi_driver ( scmi_clocks_driver ) ;
MODULE_AUTHOR ( " Sudeep Holla <sudeep.holla@arm.com> " ) ;
MODULE_DESCRIPTION ( " ARM SCMI clock driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;