2017-06-06 13:27:57 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface ( SCMI ) Clock Protocol
*
* Copyright ( C ) 2018 ARM Ltd .
*/
2020-07-09 11:17:04 +03:00
# include <linux/sort.h>
2017-06-06 13:27:57 +03:00
# include "common.h"
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3 ,
CLOCK_DESCRIBE_RATES = 0x4 ,
CLOCK_RATE_SET = 0x5 ,
CLOCK_RATE_GET = 0x6 ,
CLOCK_CONFIG_SET = 0x7 ,
} ;
struct scmi_msg_resp_clock_protocol_attributes {
__le16 num_clocks ;
u8 max_async_req ;
u8 reserved ;
} ;
struct scmi_msg_resp_clock_attributes {
__le32 attributes ;
# define CLOCK_ENABLE BIT(0)
u8 name [ SCMI_MAX_STR_SIZE ] ;
} ;
struct scmi_clock_set_config {
__le32 id ;
__le32 attributes ;
} ;
struct scmi_msg_clock_describe_rates {
__le32 id ;
__le32 rate_index ;
} ;
struct scmi_msg_resp_clock_describe_rates {
__le32 num_rates_flags ;
# define NUM_RETURNED(x) ((x) & 0xfff)
# define RATE_DISCRETE(x) !((x) & BIT(12))
# define NUM_REMAINING(x) ((x) >> 16)
struct {
__le32 value_low ;
__le32 value_high ;
} rate [ 0 ] ;
# define RATE_TO_U64(X) \
( { \
typeof ( X ) x = ( X ) ; \
le32_to_cpu ( ( x ) . value_low ) | ( u64 ) le32_to_cpu ( ( x ) . value_high ) < < 32 ; \
} )
} ;
struct scmi_clock_set_rate {
__le32 flags ;
# define CLOCK_SET_ASYNC BIT(0)
2019-07-08 16:42:16 +03:00
# define CLOCK_SET_IGNORE_RESP BIT(1)
2017-06-06 13:27:57 +03:00
# define CLOCK_SET_ROUND_UP BIT(2)
# define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id ;
__le32 value_low ;
__le32 value_high ;
} ;
struct clock_info {
2019-11-22 17:48:40 +03:00
u32 version ;
2017-06-06 13:27:57 +03:00
int num_clocks ;
int max_async_req ;
2019-07-08 16:42:16 +03:00
atomic_t cur_async_req ;
2017-06-06 13:27:57 +03:00
struct scmi_clock_info * clk ;
} ;
static int scmi_clock_protocol_attributes_get ( const struct scmi_handle * handle ,
struct clock_info * ci )
{
int ret ;
struct scmi_xfer * t ;
struct scmi_msg_resp_clock_protocol_attributes * attr ;
2018-05-09 19:52:06 +03:00
ret = scmi_xfer_get_init ( handle , PROTOCOL_ATTRIBUTES ,
2017-06-06 13:27:57 +03:00
SCMI_PROTOCOL_CLOCK , 0 , sizeof ( * attr ) , & t ) ;
if ( ret )
return ret ;
attr = t - > rx . buf ;
ret = scmi_do_xfer ( handle , t ) ;
if ( ! ret ) {
ci - > num_clocks = le16_to_cpu ( attr - > num_clocks ) ;
ci - > max_async_req = attr - > max_async_req ;
}
2018-05-09 19:52:06 +03:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 13:27:57 +03:00
return ret ;
}
static int scmi_clock_attributes_get ( const struct scmi_handle * handle ,
u32 clk_id , struct scmi_clock_info * clk )
{
int ret ;
struct scmi_xfer * t ;
struct scmi_msg_resp_clock_attributes * attr ;
2018-05-09 19:52:06 +03:00
ret = scmi_xfer_get_init ( handle , CLOCK_ATTRIBUTES , SCMI_PROTOCOL_CLOCK ,
2017-06-06 13:27:57 +03:00
sizeof ( clk_id ) , sizeof ( * attr ) , & t ) ;
if ( ret )
return ret ;
2019-08-07 15:46:27 +03:00
put_unaligned_le32 ( clk_id , t - > tx . buf ) ;
2017-06-06 13:27:57 +03:00
attr = t - > rx . buf ;
ret = scmi_do_xfer ( handle , t ) ;
if ( ! ret )
2018-09-07 19:03:25 +03:00
strlcpy ( clk - > name , attr - > name , SCMI_MAX_STR_SIZE ) ;
2017-06-06 13:27:57 +03:00
else
clk - > name [ 0 ] = ' \0 ' ;
2018-05-09 19:52:06 +03:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 13:27:57 +03:00
return ret ;
}
2020-07-09 11:17:04 +03:00
static int rate_cmp_func ( const void * _r1 , const void * _r2 )
{
const u64 * r1 = _r1 , * r2 = _r2 ;
if ( * r1 < * r2 )
return - 1 ;
else if ( * r1 = = * r2 )
return 0 ;
else
return 1 ;
}
2017-06-06 13:27:57 +03:00
static int
scmi_clock_describe_rates_get ( const struct scmi_handle * handle , u32 clk_id ,
struct scmi_clock_info * clk )
{
2020-07-17 17:04:05 +03:00
u64 * rate = NULL ;
2017-06-06 13:27:57 +03:00
int ret , cnt ;
2018-03-16 14:44:42 +03:00
bool rate_discrete = false ;
2017-06-06 13:27:57 +03:00
u32 tot_rate_cnt = 0 , rates_flag ;
u16 num_returned , num_remaining ;
struct scmi_xfer * t ;
struct scmi_msg_clock_describe_rates * clk_desc ;
struct scmi_msg_resp_clock_describe_rates * rlist ;
2018-05-09 19:52:06 +03:00
ret = scmi_xfer_get_init ( handle , CLOCK_DESCRIBE_RATES ,
2017-06-06 13:27:57 +03:00
SCMI_PROTOCOL_CLOCK , sizeof ( * clk_desc ) , 0 , & t ) ;
if ( ret )
return ret ;
clk_desc = t - > tx . buf ;
rlist = t - > rx . buf ;
do {
clk_desc - > id = cpu_to_le32 ( clk_id ) ;
/* Set the number of rates to be skipped/already read */
clk_desc - > rate_index = cpu_to_le32 ( tot_rate_cnt ) ;
ret = scmi_do_xfer ( handle , t ) ;
if ( ret )
2018-03-16 14:44:42 +03:00
goto err ;
2017-06-06 13:27:57 +03:00
rates_flag = le32_to_cpu ( rlist - > num_rates_flags ) ;
num_remaining = NUM_REMAINING ( rates_flag ) ;
rate_discrete = RATE_DISCRETE ( rates_flag ) ;
num_returned = NUM_RETURNED ( rates_flag ) ;
if ( tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES ) {
dev_err ( handle - > dev , " No. of rates > MAX_NUM_RATES " ) ;
break ;
}
if ( ! rate_discrete ) {
clk - > range . min_rate = RATE_TO_U64 ( rlist - > rate [ 0 ] ) ;
clk - > range . max_rate = RATE_TO_U64 ( rlist - > rate [ 1 ] ) ;
clk - > range . step_size = RATE_TO_U64 ( rlist - > rate [ 2 ] ) ;
dev_dbg ( handle - > dev , " Min %llu Max %llu Step %llu Hz \n " ,
clk - > range . min_rate , clk - > range . max_rate ,
clk - > range . step_size ) ;
break ;
}
rate = & clk - > list . rates [ tot_rate_cnt ] ;
for ( cnt = 0 ; cnt < num_returned ; cnt + + , rate + + ) {
* rate = RATE_TO_U64 ( rlist - > rate [ cnt ] ) ;
dev_dbg ( handle - > dev , " Rate %llu Hz \n " , * rate ) ;
}
tot_rate_cnt + = num_returned ;
2020-10-12 16:26:24 +03:00
scmi_reset_rx_to_maxsz ( handle , t ) ;
2017-06-06 13:27:57 +03:00
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while ( num_returned & & num_remaining ) ;
2020-07-09 11:17:04 +03:00
if ( rate_discrete & & rate ) {
2017-06-06 13:27:57 +03:00
clk - > list . num_rates = tot_rate_cnt ;
2020-07-09 11:17:04 +03:00
sort ( rate , tot_rate_cnt , sizeof ( * rate ) , rate_cmp_func , NULL ) ;
}
2017-06-06 13:27:57 +03:00
2019-05-22 13:15:21 +03:00
clk - > rate_discrete = rate_discrete ;
2018-03-16 14:44:42 +03:00
err :
2018-05-09 19:52:06 +03:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 13:27:57 +03:00
return ret ;
}
static int
scmi_clock_rate_get ( const struct scmi_handle * handle , u32 clk_id , u64 * value )
{
int ret ;
struct scmi_xfer * t ;
2018-05-09 19:52:06 +03:00
ret = scmi_xfer_get_init ( handle , CLOCK_RATE_GET , SCMI_PROTOCOL_CLOCK ,
2017-06-06 13:27:57 +03:00
sizeof ( __le32 ) , sizeof ( u64 ) , & t ) ;
if ( ret )
return ret ;
2019-08-07 15:46:27 +03:00
put_unaligned_le32 ( clk_id , t - > tx . buf ) ;
2017-06-06 13:27:57 +03:00
ret = scmi_do_xfer ( handle , t ) ;
2019-08-07 15:46:27 +03:00
if ( ! ret )
* value = get_unaligned_le64 ( t - > rx . buf ) ;
2017-06-06 13:27:57 +03:00
2018-05-09 19:52:06 +03:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 13:27:57 +03:00
return ret ;
}
static int scmi_clock_rate_set ( const struct scmi_handle * handle , u32 clk_id ,
2019-07-08 11:42:22 +03:00
u64 rate )
2017-06-06 13:27:57 +03:00
{
int ret ;
2019-07-08 16:42:16 +03:00
u32 flags = 0 ;
2017-06-06 13:27:57 +03:00
struct scmi_xfer * t ;
struct scmi_clock_set_rate * cfg ;
2019-07-08 16:42:16 +03:00
struct clock_info * ci = handle - > clk_priv ;
2017-06-06 13:27:57 +03:00
2018-05-09 19:52:06 +03:00
ret = scmi_xfer_get_init ( handle , CLOCK_RATE_SET , SCMI_PROTOCOL_CLOCK ,
2017-06-06 13:27:57 +03:00
sizeof ( * cfg ) , 0 , & t ) ;
if ( ret )
return ret ;
2019-07-08 16:42:16 +03:00
if ( ci - > max_async_req & &
atomic_inc_return ( & ci - > cur_async_req ) < ci - > max_async_req )
flags | = CLOCK_SET_ASYNC ;
2017-06-06 13:27:57 +03:00
cfg = t - > tx . buf ;
2019-07-08 16:42:16 +03:00
cfg - > flags = cpu_to_le32 ( flags ) ;
2017-06-06 13:27:57 +03:00
cfg - > id = cpu_to_le32 ( clk_id ) ;
cfg - > value_low = cpu_to_le32 ( rate & 0xffffffff ) ;
cfg - > value_high = cpu_to_le32 ( rate > > 32 ) ;
2019-07-08 16:42:16 +03:00
if ( flags & CLOCK_SET_ASYNC )
ret = scmi_do_xfer_with_response ( handle , t ) ;
else
ret = scmi_do_xfer ( handle , t ) ;
if ( ci - > max_async_req )
atomic_dec ( & ci - > cur_async_req ) ;
2017-06-06 13:27:57 +03:00
2018-05-09 19:52:06 +03:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 13:27:57 +03:00
return ret ;
}
static int
scmi_clock_config_set ( const struct scmi_handle * handle , u32 clk_id , u32 config )
{
int ret ;
struct scmi_xfer * t ;
struct scmi_clock_set_config * cfg ;
2018-05-09 19:52:06 +03:00
ret = scmi_xfer_get_init ( handle , CLOCK_CONFIG_SET , SCMI_PROTOCOL_CLOCK ,
2017-06-06 13:27:57 +03:00
sizeof ( * cfg ) , 0 , & t ) ;
if ( ret )
return ret ;
cfg = t - > tx . buf ;
cfg - > id = cpu_to_le32 ( clk_id ) ;
cfg - > attributes = cpu_to_le32 ( config ) ;
ret = scmi_do_xfer ( handle , t ) ;
2018-05-09 19:52:06 +03:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 13:27:57 +03:00
return ret ;
}
static int scmi_clock_enable ( const struct scmi_handle * handle , u32 clk_id )
{
return scmi_clock_config_set ( handle , clk_id , CLOCK_ENABLE ) ;
}
static int scmi_clock_disable ( const struct scmi_handle * handle , u32 clk_id )
{
return scmi_clock_config_set ( handle , clk_id , 0 ) ;
}
static int scmi_clock_count_get ( const struct scmi_handle * handle )
{
struct clock_info * ci = handle - > clk_priv ;
return ci - > num_clocks ;
}
static const struct scmi_clock_info *
scmi_clock_info_get ( const struct scmi_handle * handle , u32 clk_id )
{
struct clock_info * ci = handle - > clk_priv ;
struct scmi_clock_info * clk = ci - > clk + clk_id ;
2018-03-21 21:01:58 +03:00
if ( ! clk - > name [ 0 ] )
2017-06-06 13:27:57 +03:00
return NULL ;
return clk ;
}
2020-09-07 02:04:52 +03:00
static const struct scmi_clk_ops clk_ops = {
2017-06-06 13:27:57 +03:00
. count_get = scmi_clock_count_get ,
. info_get = scmi_clock_info_get ,
. rate_get = scmi_clock_rate_get ,
. rate_set = scmi_clock_rate_set ,
. enable = scmi_clock_enable ,
. disable = scmi_clock_disable ,
} ;
static int scmi_clock_protocol_init ( struct scmi_handle * handle )
{
u32 version ;
int clkid , ret ;
struct clock_info * cinfo ;
scmi_version_get ( handle , SCMI_PROTOCOL_CLOCK , & version ) ;
dev_dbg ( handle - > dev , " Clock Version %d.%d \n " ,
PROTOCOL_REV_MAJOR ( version ) , PROTOCOL_REV_MINOR ( version ) ) ;
cinfo = devm_kzalloc ( handle - > dev , sizeof ( * cinfo ) , GFP_KERNEL ) ;
if ( ! cinfo )
return - ENOMEM ;
scmi_clock_protocol_attributes_get ( handle , cinfo ) ;
cinfo - > clk = devm_kcalloc ( handle - > dev , cinfo - > num_clocks ,
sizeof ( * cinfo - > clk ) , GFP_KERNEL ) ;
if ( ! cinfo - > clk )
return - ENOMEM ;
for ( clkid = 0 ; clkid < cinfo - > num_clocks ; clkid + + ) {
struct scmi_clock_info * clk = cinfo - > clk + clkid ;
ret = scmi_clock_attributes_get ( handle , clkid , clk ) ;
if ( ! ret )
scmi_clock_describe_rates_get ( handle , clkid , clk ) ;
}
2019-11-22 17:48:40 +03:00
cinfo - > version = version ;
2017-06-06 13:27:57 +03:00
handle - > clk_ops = & clk_ops ;
handle - > clk_priv = cinfo ;
return 0 ;
}
2020-09-07 14:06:01 +03:00
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER ( SCMI_PROTOCOL_CLOCK , clock )