2017-03-28 11:36:07 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface ( SCMI ) Message Protocol driver
*
* SCMI Message Protocol is used between the System Control Processor ( SCP )
* and the Application Processors ( AP ) . The Message Handling Unit ( MHU )
* provides a mechanism for inter - processor communication between SCP ' s
* Cortex M3 and AP .
*
* SCP offers control and management of the core / cluster power states ,
* various power domain DVFS including the core / cluster , certain system
* clocks configuration , thermal sensors and many others .
*
* Copyright ( C ) 2018 ARM Ltd .
*/
# include <linux/bitmap.h>
# include <linux/export.h>
# include <linux/io.h>
# include <linux/kernel.h>
2017-07-20 14:39:57 +01:00
# include <linux/ktime.h>
2017-03-28 11:36:07 +01:00
# include <linux/module.h>
# include <linux/of_address.h>
# include <linux/of_device.h>
2017-07-20 14:39:57 +01:00
# include <linux/processor.h>
2017-03-28 11:36:07 +01:00
# include <linux/slab.h>
# include "common.h"
2020-07-01 16:53:43 +01:00
# include "notify.h"
2017-03-28 11:36:07 +01:00
2019-12-17 13:43:45 +00:00
# define CREATE_TRACE_POINTS
# include <trace/events/scmi.h>
2017-03-28 11:36:07 +01:00
enum scmi_error_codes {
SCMI_SUCCESS = 0 , /* Success */
SCMI_ERR_SUPPORT = - 1 , /* Not supported */
SCMI_ERR_PARAMS = - 2 , /* Invalid Parameters */
SCMI_ERR_ACCESS = - 3 , /* Invalid access/permission denied */
SCMI_ERR_ENTRY = - 4 , /* Not found */
SCMI_ERR_RANGE = - 5 , /* Value out of range */
SCMI_ERR_BUSY = - 6 , /* Device busy */
SCMI_ERR_COMMS = - 7 , /* Communication Error */
SCMI_ERR_GENERIC = - 8 , /* Generic Error */
SCMI_ERR_HARDWARE = - 9 , /* Hardware Error */
SCMI_ERR_PROTOCOL = - 10 , /* Protocol Error */
SCMI_ERR_MAX
} ;
2018-05-09 17:52:06 +01:00
/* List of all SCMI devices active in system */
2017-03-28 11:36:07 +01:00
static LIST_HEAD ( scmi_list ) ;
/* Protection for the entire list */
static DEFINE_MUTEX ( scmi_list_mutex ) ;
2019-12-17 13:43:45 +00:00
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id ;
2017-03-28 11:36:07 +01:00
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
* @ xfer_block : Preallocated Message array
* @ xfer_alloc_table : Bitmap table for allocated messages .
* Index of this bitmap table is also used for message
* sequence identifier .
* @ xfer_lock : Protection for message allocation
*/
struct scmi_xfers_info {
struct scmi_xfer * xfer_block ;
unsigned long * xfer_alloc_table ;
spinlock_t xfer_lock ;
} ;
/**
2018-05-09 17:52:06 +01:00
* struct scmi_info - Structure representing a SCMI instance
2017-03-28 11:36:07 +01:00
*
* @ dev : Device pointer
* @ desc : SoC description for this instance
2017-06-06 11:16:15 +01:00
* @ version : SCMI revision information containing protocol version ,
* implementation version and ( sub - ) vendor identification .
2020-01-31 10:58:11 +05:30
* @ handle : Instance of SCMI handle to send to clients
2019-07-08 09:40:48 +01:00
* @ tx_minfo : Universal Transmit Message management info
2020-03-27 14:34:26 +00:00
* @ rx_minfo : Universal Receive Message management info
2019-07-08 09:40:44 +01:00
* @ tx_idr : IDR object to map protocol id to Tx channel info pointer
2019-07-08 09:40:46 +01:00
* @ rx_idr : IDR object to map protocol id to Rx channel info pointer
2018-05-09 17:52:06 +01:00
* @ protocols_imp : List of protocols implemented , currently maximum of
2017-06-06 11:16:15 +01:00
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
2018-05-09 17:52:06 +01:00
* @ node : List head
2017-03-28 11:36:07 +01:00
* @ users : Number of users of this instance
*/
struct scmi_info {
struct device * dev ;
const struct scmi_desc * desc ;
2017-06-06 11:16:15 +01:00
struct scmi_revision_info version ;
2017-03-28 11:36:07 +01:00
struct scmi_handle handle ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info tx_minfo ;
2020-03-27 14:34:26 +00:00
struct scmi_xfers_info rx_minfo ;
2017-07-31 15:43:27 +01:00
struct idr tx_idr ;
2019-07-08 09:40:46 +01:00
struct idr rx_idr ;
2017-06-06 11:16:15 +01:00
u8 * protocols_imp ;
2017-03-28 11:36:07 +01:00
struct list_head node ;
int users ;
} ;
# define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
static const int scmi_linux_errmap [ ] = {
/* better than switch case as long as return value is continuous */
0 , /* SCMI_SUCCESS */
- EOPNOTSUPP , /* SCMI_ERR_SUPPORT */
- EINVAL , /* SCMI_ERR_PARAM */
- EACCES , /* SCMI_ERR_ACCESS */
- ENOENT , /* SCMI_ERR_ENTRY */
- ERANGE , /* SCMI_ERR_RANGE */
- EBUSY , /* SCMI_ERR_BUSY */
- ECOMM , /* SCMI_ERR_COMMS */
- EIO , /* SCMI_ERR_GENERIC */
- EREMOTEIO , /* SCMI_ERR_HARDWARE */
- EPROTO , /* SCMI_ERR_PROTOCOL */
} ;
static inline int scmi_to_linux_errno ( int errno )
{
if ( errno < SCMI_SUCCESS & & errno > SCMI_ERR_MAX )
return scmi_linux_errmap [ - errno ] ;
return - EIO ;
}
/**
* scmi_dump_header_dbg ( ) - Helper to dump a message header .
*
* @ dev : Device pointer corresponding to the SCMI entity
* @ hdr : pointer to header .
*/
static inline void scmi_dump_header_dbg ( struct device * dev ,
struct scmi_msg_hdr * hdr )
{
2019-07-08 09:40:40 +01:00
dev_dbg ( dev , " Message ID: %x Sequence ID: %x Protocol: %x \n " ,
2017-03-28 11:36:07 +01:00
hdr - > id , hdr - > seq , hdr - > protocol_id ) ;
}
/**
2018-05-09 17:52:06 +01:00
* scmi_xfer_get ( ) - Allocate one message
2017-03-28 11:36:07 +01:00
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2019-07-08 09:40:48 +01:00
* @ minfo : Pointer to Tx / Rx Message management info based on channel type
2017-03-28 11:36:07 +01:00
*
2019-07-08 09:40:40 +01:00
* Helper function which is used by various message functions that are
2017-03-28 11:36:07 +01:00
* exposed to clients of this driver for allocating a message traffic event .
*
* This function can sleep depending on pending requests already in the system
* for the SCMI entity . Further , this also holds a spinlock to maintain
* integrity of internal data structures .
*
* Return : 0 if all went fine , else corresponding error .
*/
2019-07-08 09:40:48 +01:00
static struct scmi_xfer * scmi_xfer_get ( const struct scmi_handle * handle ,
struct scmi_xfers_info * minfo )
2017-03-28 11:36:07 +01:00
{
u16 xfer_id ;
struct scmi_xfer * xfer ;
unsigned long flags , bit_pos ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
/* Keep the locked section as small as possible */
spin_lock_irqsave ( & minfo - > xfer_lock , flags ) ;
bit_pos = find_first_zero_bit ( minfo - > xfer_alloc_table ,
info - > desc - > max_msg ) ;
if ( bit_pos = = info - > desc - > max_msg ) {
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
return ERR_PTR ( - ENOMEM ) ;
}
set_bit ( bit_pos , minfo - > xfer_alloc_table ) ;
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
xfer_id = bit_pos ;
xfer = & minfo - > xfer_block [ xfer_id ] ;
xfer - > hdr . seq = xfer_id ;
reinit_completion ( & xfer - > done ) ;
2019-12-17 13:43:45 +00:00
xfer - > transfer_id = atomic_inc_return ( & transfer_last_id ) ;
2017-03-28 11:36:07 +01:00
return xfer ;
}
/**
2019-07-08 09:40:48 +01:00
* __scmi_xfer_put ( ) - Release a message
2017-03-28 11:36:07 +01:00
*
2019-07-08 09:40:48 +01:00
* @ minfo : Pointer to Tx / Rx Message management info based on channel type
2018-05-09 17:52:06 +01:00
* @ xfer : message that was reserved by scmi_xfer_get
2017-03-28 11:36:07 +01:00
*
* This holds a spinlock to maintain integrity of internal data structures .
*/
2019-07-08 09:40:48 +01:00
static void
__scmi_xfer_put ( struct scmi_xfers_info * minfo , struct scmi_xfer * xfer )
2017-03-28 11:36:07 +01:00
{
unsigned long flags ;
/*
* Keep the locked section as small as possible
* NOTE : we might escape with smp_mb and no lock here . .
* but just be conservative and symmetric .
*/
spin_lock_irqsave ( & minfo - > xfer_lock , flags ) ;
clear_bit ( xfer - > hdr . seq , minfo - > xfer_alloc_table ) ;
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
}
2020-03-27 14:34:29 +00:00
static void scmi_handle_notification ( struct scmi_chan_info * cinfo , u32 msg_hdr )
2019-07-08 09:40:42 +01:00
{
2020-01-31 10:58:13 +05:30
struct scmi_xfer * xfer ;
2020-03-27 14:34:29 +00:00
struct device * dev = cinfo - > dev ;
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
struct scmi_xfers_info * minfo = & info - > rx_minfo ;
2020-07-10 14:39:19 +01:00
ktime_t ts ;
2020-03-27 14:34:29 +00:00
2020-07-10 14:39:19 +01:00
ts = ktime_get_boottime ( ) ;
2020-03-27 14:34:29 +00:00
xfer = scmi_xfer_get ( cinfo - > handle , minfo ) ;
if ( IS_ERR ( xfer ) ) {
dev_err ( dev , " failed to get free message slot (%ld) \n " ,
PTR_ERR ( xfer ) ) ;
2020-04-20 16:23:12 +01:00
info - > desc - > ops - > clear_channel ( cinfo ) ;
2020-03-27 14:34:29 +00:00
return ;
}
unpack_scmi_header ( msg_hdr , & xfer - > hdr ) ;
scmi_dump_header_dbg ( dev , & xfer - > hdr ) ;
info - > desc - > ops - > fetch_notification ( cinfo , info - > desc - > max_msg_size ,
xfer ) ;
2020-07-01 16:53:43 +01:00
scmi_notify ( cinfo - > handle , xfer - > hdr . protocol_id ,
xfer - > hdr . id , xfer - > rx . buf , xfer - > rx . len , ts ) ;
2020-03-27 14:34:29 +00:00
trace_scmi_rx_done ( xfer - > transfer_id , xfer - > hdr . id ,
xfer - > hdr . protocol_id , xfer - > hdr . seq ,
MSG_TYPE_NOTIFICATION ) ;
2019-07-08 09:40:54 +01:00
2020-03-27 14:34:29 +00:00
__scmi_xfer_put ( minfo , xfer ) ;
2020-04-20 16:23:12 +01:00
info - > desc - > ops - > clear_channel ( cinfo ) ;
2020-03-27 14:34:29 +00:00
}
static void scmi_handle_response ( struct scmi_chan_info * cinfo ,
u16 xfer_id , u8 msg_type )
{
struct scmi_xfer * xfer ;
struct device * dev = cinfo - > dev ;
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
struct scmi_xfers_info * minfo = & info - > tx_minfo ;
2019-07-08 09:40:42 +01:00
/* Are we even expecting this? */
if ( ! test_bit ( xfer_id , minfo - > xfer_alloc_table ) ) {
dev_err ( dev , " message for %d is not expected! \n " , xfer_id ) ;
2020-04-20 16:23:13 +01:00
info - > desc - > ops - > clear_channel ( cinfo ) ;
2019-07-08 09:40:42 +01:00
return ;
}
xfer = & minfo - > xfer_block [ xfer_id ] ;
2020-04-20 16:23:15 +01:00
/*
* Even if a response was indeed expected on this slot at this point ,
* a buggy platform could wrongly reply feeding us an unexpected
* delayed response we ' re not prepared to handle : bail - out safely
* blaming firmware .
*/
if ( unlikely ( msg_type = = MSG_TYPE_DELAYED_RESP & & ! xfer - > async_done ) ) {
dev_err ( dev ,
" Delayed Response for %d not expected! Buggy F/W ? \n " ,
xfer_id ) ;
info - > desc - > ops - > clear_channel ( cinfo ) ;
/* It was unexpected, so nobody will clear the xfer if not us */
__scmi_xfer_put ( minfo , xfer ) ;
return ;
}
2019-07-08 09:40:42 +01:00
scmi_dump_header_dbg ( dev , & xfer - > hdr ) ;
2020-01-31 10:58:13 +05:30
info - > desc - > ops - > fetch_response ( cinfo , xfer ) ;
2019-07-08 09:40:54 +01:00
2019-12-17 13:43:45 +00:00
trace_scmi_rx_done ( xfer - > transfer_id , xfer - > hdr . id ,
xfer - > hdr . protocol_id , xfer - > hdr . seq ,
msg_type ) ;
2020-04-20 16:23:14 +01:00
if ( msg_type = = MSG_TYPE_DELAYED_RESP ) {
info - > desc - > ops - > clear_channel ( cinfo ) ;
2019-07-08 09:40:54 +01:00
complete ( xfer - > async_done ) ;
2020-04-20 16:23:14 +01:00
} else {
2019-07-08 09:40:54 +01:00
complete ( & xfer - > done ) ;
2020-04-20 16:23:14 +01:00
}
2019-07-08 09:40:42 +01:00
}
2020-03-27 14:34:29 +00:00
/**
* scmi_rx_callback ( ) - callback for receiving messages
*
* @ cinfo : SCMI channel info
* @ msg_hdr : Message header
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer .
*
* NOTE : This function will be invoked in IRQ context , hence should be
* as optimal as possible .
*/
void scmi_rx_callback ( struct scmi_chan_info * cinfo , u32 msg_hdr )
{
u16 xfer_id = MSG_XTRACT_TOKEN ( msg_hdr ) ;
u8 msg_type = MSG_XTRACT_TYPE ( msg_hdr ) ;
switch ( msg_type ) {
case MSG_TYPE_NOTIFICATION :
scmi_handle_notification ( cinfo , msg_hdr ) ;
break ;
case MSG_TYPE_COMMAND :
case MSG_TYPE_DELAYED_RESP :
scmi_handle_response ( cinfo , xfer_id , msg_type ) ;
break ;
default :
WARN_ONCE ( 1 , " received unknown msg_type:%d \n " , msg_type ) ;
break ;
}
}
2019-07-08 09:40:48 +01:00
/**
* scmi_xfer_put ( ) - Release a transmit message
*
* @ handle : Pointer to SCMI entity handle
* @ xfer : message that was reserved by scmi_xfer_get
*/
void scmi_xfer_put ( const struct scmi_handle * handle , struct scmi_xfer * xfer )
{
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
__scmi_xfer_put ( & info - > tx_minfo , xfer ) ;
}
2017-07-20 14:39:57 +01:00
# define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
2020-01-31 10:58:13 +05:30
static bool scmi_xfer_done_no_timeout ( struct scmi_chan_info * cinfo ,
2017-07-20 14:39:57 +01:00
struct scmi_xfer * xfer , ktime_t stop )
{
2020-01-31 10:58:13 +05:30
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
2017-07-20 14:39:57 +01:00
2020-01-31 10:58:13 +05:30
return info - > desc - > ops - > poll_done ( cinfo , xfer ) | |
ktime_after ( ktime_get ( ) , stop ) ;
2017-07-20 14:39:57 +01:00
}
2017-03-28 11:36:07 +01:00
/**
* scmi_do_xfer ( ) - Do one transfer
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2017-03-28 11:36:07 +01:00
* @ xfer : Transfer to initiate and wait for response
*
* Return : - ETIMEDOUT in case of no response , if transmit error ,
2018-05-09 17:52:06 +01:00
* return corresponding error , else if all goes well ,
* return 0.
2017-03-28 11:36:07 +01:00
*/
int scmi_do_xfer ( const struct scmi_handle * handle , struct scmi_xfer * xfer )
{
int ret ;
int timeout ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
struct device * dev = info - > dev ;
2017-07-31 15:43:27 +01:00
struct scmi_chan_info * cinfo ;
cinfo = idr_find ( & info - > tx_idr , xfer - > hdr . protocol_id ) ;
if ( unlikely ( ! cinfo ) )
return - EINVAL ;
2017-03-28 11:36:07 +01:00
2019-12-17 13:43:45 +00:00
trace_scmi_xfer_begin ( xfer - > transfer_id , xfer - > hdr . id ,
xfer - > hdr . protocol_id , xfer - > hdr . seq ,
xfer - > hdr . poll_completion ) ;
2020-01-31 10:58:13 +05:30
ret = info - > desc - > ops - > send_message ( cinfo , xfer ) ;
2017-03-28 11:36:07 +01:00
if ( ret < 0 ) {
2020-01-31 10:58:13 +05:30
dev_dbg ( dev , " Failed to send message %d \n " , ret ) ;
2017-03-28 11:36:07 +01:00
return ret ;
}
2017-07-20 14:39:57 +01:00
if ( xfer - > hdr . poll_completion ) {
ktime_t stop = ktime_add_ns ( ktime_get ( ) , SCMI_MAX_POLL_TO_NS ) ;
2017-07-31 15:25:32 +01:00
spin_until_cond ( scmi_xfer_done_no_timeout ( cinfo , xfer , stop ) ) ;
2017-07-20 14:39:57 +01:00
if ( ktime_before ( ktime_get ( ) , stop ) )
2020-01-31 10:58:13 +05:30
info - > desc - > ops - > fetch_response ( cinfo , xfer ) ;
2017-07-20 14:39:57 +01:00
else
ret = - ETIMEDOUT ;
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies ( info - > desc - > max_rx_timeout_ms ) ;
if ( ! wait_for_completion_timeout ( & xfer - > done , timeout ) ) {
2020-01-31 10:58:13 +05:30
dev_err ( dev , " timed out in resp(caller: %pS) \n " ,
2017-07-20 14:39:57 +01:00
( void * ) _RET_IP_ ) ;
ret = - ETIMEDOUT ;
}
2017-03-28 11:36:07 +01:00
}
2017-07-20 14:39:57 +01:00
if ( ! ret & & xfer - > hdr . status )
ret = scmi_to_linux_errno ( xfer - > hdr . status ) ;
2020-01-31 10:58:13 +05:30
if ( info - > desc - > ops - > mark_txdone )
info - > desc - > ops - > mark_txdone ( cinfo , ret ) ;
2017-03-28 11:36:07 +01:00
2019-12-17 13:43:45 +00:00
trace_scmi_xfer_end ( xfer - > transfer_id , xfer - > hdr . id ,
2020-06-09 14:45:03 +01:00
xfer - > hdr . protocol_id , xfer - > hdr . seq , ret ) ;
2019-12-17 13:43:45 +00:00
2017-03-28 11:36:07 +01:00
return ret ;
}
2019-07-08 09:40:54 +01:00
# define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
* scmi_do_xfer_with_response ( ) - Do one transfer and wait until the delayed
* response is received
*
* @ handle : Pointer to SCMI entity handle
* @ xfer : Transfer to initiate and wait for response
*
* Return : - ETIMEDOUT in case of no delayed response , if transmit error ,
* return corresponding error , else if all goes well , return 0.
*/
int scmi_do_xfer_with_response ( const struct scmi_handle * handle ,
struct scmi_xfer * xfer )
{
int ret , timeout = msecs_to_jiffies ( SCMI_MAX_RESPONSE_TIMEOUT ) ;
DECLARE_COMPLETION_ONSTACK ( async_response ) ;
xfer - > async_done = & async_response ;
ret = scmi_do_xfer ( handle , xfer ) ;
if ( ! ret & & ! wait_for_completion_timeout ( xfer - > async_done , timeout ) )
ret = - ETIMEDOUT ;
xfer - > async_done = NULL ;
return ret ;
}
2017-03-28 11:36:07 +01:00
/**
2019-07-08 09:40:48 +01:00
* scmi_xfer_get_init ( ) - Allocate and initialise one message for transmit
2017-03-28 11:36:07 +01:00
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2017-03-28 11:36:07 +01:00
* @ msg_id : Message identifier
2018-05-09 17:52:06 +01:00
* @ prot_id : Protocol identifier for the message
2017-03-28 11:36:07 +01:00
* @ tx_size : transmit message size
* @ rx_size : receive message size
* @ p : pointer to the allocated and initialised message
*
2018-05-09 17:52:06 +01:00
* This function allocates the message using @ scmi_xfer_get and
2017-03-28 11:36:07 +01:00
* initialise the header .
*
* Return : 0 if all went fine with @ p pointing to message , else
* corresponding error .
*/
2018-05-09 17:52:06 +01:00
int scmi_xfer_get_init ( const struct scmi_handle * handle , u8 msg_id , u8 prot_id ,
2017-03-28 11:36:07 +01:00
size_t tx_size , size_t rx_size , struct scmi_xfer * * p )
{
int ret ;
struct scmi_xfer * xfer ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info * minfo = & info - > tx_minfo ;
2017-03-28 11:36:07 +01:00
struct device * dev = info - > dev ;
/* Ensure we have sane transfer sizes */
if ( rx_size > info - > desc - > max_msg_size | |
tx_size > info - > desc - > max_msg_size )
return - ERANGE ;
2019-07-08 09:40:48 +01:00
xfer = scmi_xfer_get ( handle , minfo ) ;
2017-03-28 11:36:07 +01:00
if ( IS_ERR ( xfer ) ) {
ret = PTR_ERR ( xfer ) ;
dev_err ( dev , " failed to get free message slot(%d) \n " , ret ) ;
return ret ;
}
xfer - > tx . len = tx_size ;
xfer - > rx . len = rx_size ? : info - > desc - > max_msg_size ;
xfer - > hdr . id = msg_id ;
xfer - > hdr . protocol_id = prot_id ;
xfer - > hdr . poll_completion = false ;
* p = xfer ;
2018-05-09 17:52:06 +01:00
2017-03-28 11:36:07 +01:00
return 0 ;
}
2017-06-06 11:16:15 +01:00
/**
* scmi_version_get ( ) - command to get the revision of the SCMI entity
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
* @ protocol : Protocol identifier for the message
* @ version : Holds returned version of protocol .
2017-06-06 11:16:15 +01:00
*
* Updates the SCMI information in the internal data structure .
*
* Return : 0 if all went fine , else return appropriate error .
*/
int scmi_version_get ( const struct scmi_handle * handle , u8 protocol ,
u32 * version )
{
int ret ;
__le32 * rev_info ;
struct scmi_xfer * t ;
2018-05-09 17:52:06 +01:00
ret = scmi_xfer_get_init ( handle , PROTOCOL_VERSION , protocol , 0 ,
2017-06-06 11:16:15 +01:00
sizeof ( * version ) , & t ) ;
if ( ret )
return ret ;
ret = scmi_do_xfer ( handle , t ) ;
if ( ! ret ) {
rev_info = t - > rx . buf ;
* version = le32_to_cpu ( * rev_info ) ;
}
2018-05-09 17:52:06 +01:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 11:16:15 +01:00
return ret ;
}
void scmi_setup_protocol_implemented ( const struct scmi_handle * handle ,
u8 * prot_imp )
{
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
info - > protocols_imp = prot_imp ;
}
2017-06-06 11:39:08 +01:00
static bool
scmi_is_protocol_implemented ( const struct scmi_handle * handle , u8 prot_id )
{
int i ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
if ( ! info - > protocols_imp )
return false ;
for ( i = 0 ; i < MAX_PROTOCOLS_IMP ; i + + )
if ( info - > protocols_imp [ i ] = = prot_id )
return true ;
return false ;
}
2017-03-28 11:36:07 +01:00
/**
2018-05-09 17:52:06 +01:00
* scmi_handle_get ( ) - Get the SCMI handle for a device
2017-03-28 11:36:07 +01:00
*
* @ dev : pointer to device for which we want SCMI handle
*
* NOTE : The function does not track individual clients of the framework
2018-05-09 17:52:06 +01:00
* and is expected to be maintained by caller of SCMI protocol library .
2017-03-28 11:36:07 +01:00
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return : pointer to handle if successful , NULL on error
*/
struct scmi_handle * scmi_handle_get ( struct device * dev )
{
struct list_head * p ;
struct scmi_info * info ;
struct scmi_handle * handle = NULL ;
mutex_lock ( & scmi_list_mutex ) ;
list_for_each ( p , & scmi_list ) {
info = list_entry ( p , struct scmi_info , node ) ;
if ( dev - > parent = = info - > dev ) {
handle = & info - > handle ;
info - > users + + ;
break ;
}
}
mutex_unlock ( & scmi_list_mutex ) ;
return handle ;
}
/**
* scmi_handle_put ( ) - Release the handle acquired by scmi_handle_get
*
* @ handle : handle acquired by scmi_handle_get
*
* NOTE : The function does not track individual clients of the framework
2018-05-09 17:52:06 +01:00
* and is expected to be maintained by caller of SCMI protocol library .
2017-03-28 11:36:07 +01:00
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return : 0 is successfully released
* if null was passed , it returns - EINVAL ;
*/
int scmi_handle_put ( const struct scmi_handle * handle )
{
struct scmi_info * info ;
if ( ! handle )
return - EINVAL ;
info = handle_to_scmi_info ( handle ) ;
mutex_lock ( & scmi_list_mutex ) ;
if ( ! WARN_ON ( ! info - > users ) )
info - > users - - ;
mutex_unlock ( & scmi_list_mutex ) ;
return 0 ;
}
2020-03-27 14:34:26 +00:00
static int __scmi_xfer_info_init ( struct scmi_info * sinfo ,
struct scmi_xfers_info * info )
2017-03-28 11:36:07 +01:00
{
int i ;
struct scmi_xfer * xfer ;
struct device * dev = sinfo - > dev ;
const struct scmi_desc * desc = sinfo - > desc ;
/* Pre-allocated messages, no more than what hdr.seq can support */
2018-05-09 17:52:06 +01:00
if ( WARN_ON ( desc - > max_msg > = MSG_TOKEN_MAX ) ) {
dev_err ( dev , " Maximum message of %d exceeds supported %ld \n " ,
desc - > max_msg , MSG_TOKEN_MAX ) ;
2017-03-28 11:36:07 +01:00
return - EINVAL ;
}
info - > xfer_block = devm_kcalloc ( dev , desc - > max_msg ,
sizeof ( * info - > xfer_block ) , GFP_KERNEL ) ;
if ( ! info - > xfer_block )
return - ENOMEM ;
info - > xfer_alloc_table = devm_kcalloc ( dev , BITS_TO_LONGS ( desc - > max_msg ) ,
sizeof ( long ) , GFP_KERNEL ) ;
if ( ! info - > xfer_alloc_table )
return - ENOMEM ;
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for ( i = 0 , xfer = info - > xfer_block ; i < desc - > max_msg ; i + + , xfer + + ) {
xfer - > rx . buf = devm_kcalloc ( dev , sizeof ( u8 ) , desc - > max_msg_size ,
GFP_KERNEL ) ;
if ( ! xfer - > rx . buf )
return - ENOMEM ;
xfer - > tx . buf = xfer - > rx . buf ;
init_completion ( & xfer - > done ) ;
}
spin_lock_init ( & info - > xfer_lock ) ;
return 0 ;
}
2020-03-27 14:34:26 +00:00
static int scmi_xfer_info_init ( struct scmi_info * sinfo )
{
int ret = __scmi_xfer_info_init ( sinfo , & sinfo - > tx_minfo ) ;
if ( ! ret & & idr_find ( & sinfo - > rx_idr , SCMI_PROTOCOL_BASE ) )
ret = __scmi_xfer_info_init ( sinfo , & sinfo - > rx_minfo ) ;
return ret ;
}
2020-01-31 10:58:13 +05:30
static int scmi_chan_setup ( struct scmi_info * info , struct device * dev ,
int prot_id , bool tx )
2017-03-28 11:36:07 +01:00
{
2019-07-08 09:40:44 +01:00
int ret , idx ;
2017-07-31 15:25:32 +01:00
struct scmi_chan_info * cinfo ;
2019-07-08 09:40:46 +01:00
struct idr * idr ;
2019-07-08 09:40:44 +01:00
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1 ;
2019-07-08 09:40:46 +01:00
idr = tx ? & info - > tx_idr : & info - > rx_idr ;
2017-03-28 11:36:07 +01:00
2019-11-06 11:32:05 +00:00
/* check if already allocated, used for multiple device per protocol */
cinfo = idr_find ( idr , prot_id ) ;
if ( cinfo )
return 0 ;
2020-01-31 10:58:13 +05:30
if ( ! info - > desc - > ops - > chan_available ( dev , idx ) ) {
2019-07-08 09:40:46 +01:00
cinfo = idr_find ( idr , SCMI_PROTOCOL_BASE ) ;
if ( unlikely ( ! cinfo ) ) /* Possible only if platform has no Rx */
return - EINVAL ;
2017-07-31 15:43:27 +01:00
goto idr_alloc ;
}
2017-07-31 15:25:32 +01:00
cinfo = devm_kzalloc ( info - > dev , sizeof ( * cinfo ) , GFP_KERNEL ) ;
if ( ! cinfo )
return - ENOMEM ;
cinfo - > dev = dev ;
2020-01-31 10:58:13 +05:30
ret = info - > desc - > ops - > chan_setup ( cinfo , info - > dev , tx ) ;
if ( ret )
2017-03-28 11:36:07 +01:00
return ret ;
2017-07-31 15:43:27 +01:00
idr_alloc :
2019-07-08 09:40:46 +01:00
ret = idr_alloc ( idr , cinfo , prot_id , prot_id + 1 , GFP_KERNEL ) ;
2017-07-31 15:43:27 +01:00
if ( ret ! = prot_id ) {
dev_err ( dev , " unable to allocate SCMI idr slot err %d \n " , ret ) ;
return ret ;
}
cinfo - > handle = & info - > handle ;
2017-03-28 11:36:07 +01:00
return 0 ;
}
2019-07-08 09:40:46 +01:00
static inline int
2020-01-31 10:58:13 +05:30
scmi_txrx_setup ( struct scmi_info * info , struct device * dev , int prot_id )
2019-07-08 09:40:46 +01:00
{
2020-01-31 10:58:13 +05:30
int ret = scmi_chan_setup ( info , dev , prot_id , true ) ;
2019-07-08 09:40:46 +01:00
if ( ! ret ) /* Rx is optional, hence no error check */
2020-01-31 10:58:13 +05:30
scmi_chan_setup ( info , dev , prot_id , false ) ;
2019-07-08 09:40:46 +01:00
return ret ;
}
2017-06-06 11:39:08 +01:00
static inline void
scmi_create_protocol_device ( struct device_node * np , struct scmi_info * info ,
2018-12-21 18:08:08 +00:00
int prot_id , const char * name )
2017-06-06 11:39:08 +01:00
{
struct scmi_device * sdev ;
2018-12-21 18:08:08 +00:00
sdev = scmi_device_create ( np , info - > dev , prot_id , name ) ;
2017-06-06 11:39:08 +01:00
if ( ! sdev ) {
dev_err ( info - > dev , " failed to create %d protocol device \n " ,
prot_id ) ;
return ;
}
2020-01-31 10:58:13 +05:30
if ( scmi_txrx_setup ( info , & sdev - > dev , prot_id ) ) {
2017-07-31 15:43:27 +01:00
dev_err ( & sdev - > dev , " failed to setup transport \n " ) ;
scmi_device_destroy ( sdev ) ;
2018-04-27 17:06:49 +03:00
return ;
2017-07-31 15:43:27 +01:00
}
2017-06-06 11:39:08 +01:00
/* setup handle now as the transport is ready */
scmi_set_handle ( sdev ) ;
}
2019-11-06 15:17:26 +00:00
# define MAX_SCMI_DEV_PER_PROTOCOL 2
struct scmi_prot_devnames {
int protocol_id ;
char * names [ MAX_SCMI_DEV_PER_PROTOCOL ] ;
} ;
static struct scmi_prot_devnames devnames [ ] = {
{ SCMI_PROTOCOL_POWER , { " genpd " } , } ,
2020-09-07 18:46:56 +01:00
{ SCMI_PROTOCOL_SYSTEM , { " syspower " } , } ,
2019-11-06 15:17:26 +00:00
{ SCMI_PROTOCOL_PERF , { " cpufreq " } , } ,
{ SCMI_PROTOCOL_CLOCK , { " clocks " } , } ,
{ SCMI_PROTOCOL_SENSOR , { " hwmon " } , } ,
{ SCMI_PROTOCOL_RESET , { " reset " } , } ,
} ;
static inline void
scmi_create_protocol_devices ( struct device_node * np , struct scmi_info * info ,
int prot_id )
{
int loop , cnt ;
for ( loop = 0 ; loop < ARRAY_SIZE ( devnames ) ; loop + + ) {
if ( devnames [ loop ] . protocol_id ! = prot_id )
continue ;
for ( cnt = 0 ; cnt < ARRAY_SIZE ( devnames [ loop ] . names ) ; cnt + + ) {
const char * name = devnames [ loop ] . names [ cnt ] ;
if ( name )
scmi_create_protocol_device ( np , info , prot_id ,
name ) ;
}
}
}
2017-03-28 11:36:07 +01:00
static int scmi_probe ( struct platform_device * pdev )
{
int ret ;
struct scmi_handle * handle ;
const struct scmi_desc * desc ;
struct scmi_info * info ;
struct device * dev = & pdev - > dev ;
2017-06-06 11:39:08 +01:00
struct device_node * child , * np = dev - > of_node ;
2017-03-28 11:36:07 +01:00
2019-03-22 16:55:03 -05:00
desc = of_device_get_match_data ( dev ) ;
if ( ! desc )
return - EINVAL ;
2017-03-28 11:36:07 +01:00
info = devm_kzalloc ( dev , sizeof ( * info ) , GFP_KERNEL ) ;
if ( ! info )
return - ENOMEM ;
info - > dev = dev ;
info - > desc = desc ;
INIT_LIST_HEAD ( & info - > node ) ;
platform_set_drvdata ( pdev , info ) ;
2017-07-31 15:43:27 +01:00
idr_init ( & info - > tx_idr ) ;
2019-07-08 09:40:46 +01:00
idr_init ( & info - > rx_idr ) ;
2017-03-28 11:36:07 +01:00
handle = & info - > handle ;
handle - > dev = info - > dev ;
2017-06-06 11:16:15 +01:00
handle - > version = & info - > version ;
2017-03-28 11:36:07 +01:00
2020-01-31 10:58:13 +05:30
ret = scmi_txrx_setup ( info , dev , SCMI_PROTOCOL_BASE ) ;
2017-03-28 11:36:07 +01:00
if ( ret )
return ret ;
2020-03-27 14:34:26 +00:00
ret = scmi_xfer_info_init ( info ) ;
if ( ret )
return ret ;
2020-07-01 16:53:43 +01:00
if ( scmi_notification_init ( handle ) )
dev_err ( dev , " SCMI Notifications NOT available. \n " ) ;
2017-06-06 11:16:15 +01:00
ret = scmi_base_protocol_init ( handle ) ;
if ( ret ) {
dev_err ( dev , " unable to communicate with SCMI(%d) \n " , ret ) ;
return ret ;
}
2017-03-28 11:36:07 +01:00
mutex_lock ( & scmi_list_mutex ) ;
list_add_tail ( & info - > node , & scmi_list ) ;
mutex_unlock ( & scmi_list_mutex ) ;
2017-06-06 11:39:08 +01:00
for_each_available_child_of_node ( np , child ) {
u32 prot_id ;
if ( of_property_read_u32 ( child , " reg " , & prot_id ) )
continue ;
2018-05-09 17:52:06 +01:00
if ( ! FIELD_FIT ( MSG_PROTOCOL_ID_MASK , prot_id ) )
dev_err ( dev , " Out of range protocol %d \n " , prot_id ) ;
2017-06-06 11:39:08 +01:00
if ( ! scmi_is_protocol_implemented ( handle , prot_id ) ) {
dev_err ( dev , " SCMI protocol %d not implemented \n " ,
prot_id ) ;
continue ;
}
2019-11-06 15:17:26 +00:00
scmi_create_protocol_devices ( child , info , prot_id ) ;
2017-06-06 11:39:08 +01:00
}
2017-03-28 11:36:07 +01:00
return 0 ;
}
2020-01-31 10:58:13 +05:30
void scmi_free_channel ( struct scmi_chan_info * cinfo , struct idr * idr , int id )
2019-07-08 09:40:42 +01:00
{
idr_remove ( idr , id ) ;
}
static int scmi_remove ( struct platform_device * pdev )
{
int ret = 0 ;
struct scmi_info * info = platform_get_drvdata ( pdev ) ;
struct idr * idr = & info - > tx_idr ;
2020-07-01 16:53:43 +01:00
scmi_notification_exit ( & info - > handle ) ;
2019-07-08 09:40:42 +01:00
mutex_lock ( & scmi_list_mutex ) ;
if ( info - > users )
ret = - EBUSY ;
else
list_del ( & info - > node ) ;
mutex_unlock ( & scmi_list_mutex ) ;
if ( ret )
return ret ;
/* Safe to free channels since no more users */
2020-01-31 10:58:13 +05:30
ret = idr_for_each ( idr , info - > desc - > ops - > chan_free , idr ) ;
2019-07-08 09:40:42 +01:00
idr_destroy ( & info - > tx_idr ) ;
2019-07-08 09:40:46 +01:00
idr = & info - > rx_idr ;
2020-01-31 10:58:13 +05:30
ret = idr_for_each ( idr , info - > desc - > ops - > chan_free , idr ) ;
2019-07-08 09:40:46 +01:00
idr_destroy ( & info - > rx_idr ) ;
2019-07-08 09:40:42 +01:00
return ret ;
}
2019-10-22 10:09:01 +01:00
static ssize_t protocol_version_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %u.%u \n " , info - > version . major_ver ,
info - > version . minor_ver ) ;
}
static DEVICE_ATTR_RO ( protocol_version ) ;
static ssize_t firmware_version_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " 0x%x \n " , info - > version . impl_ver ) ;
}
static DEVICE_ATTR_RO ( firmware_version ) ;
static ssize_t vendor_id_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %s \n " , info - > version . vendor_id ) ;
}
static DEVICE_ATTR_RO ( vendor_id ) ;
static ssize_t sub_vendor_id_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %s \n " , info - > version . sub_vendor_id ) ;
}
static DEVICE_ATTR_RO ( sub_vendor_id ) ;
static struct attribute * versions_attrs [ ] = {
& dev_attr_firmware_version . attr ,
& dev_attr_protocol_version . attr ,
& dev_attr_vendor_id . attr ,
& dev_attr_sub_vendor_id . attr ,
NULL ,
} ;
ATTRIBUTE_GROUPS ( versions ) ;
2019-07-08 09:40:42 +01:00
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match [ ] = {
2020-01-31 10:58:13 +05:30
{ . compatible = " arm,scmi " , . data = & scmi_mailbox_desc } ,
2020-06-25 11:19:37 +01:00
# ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
2020-03-08 21:24:39 +08:00
{ . compatible = " arm,scmi-smc " , . data = & scmi_smc_desc } ,
# endif
2019-07-08 09:40:42 +01:00
{ /* Sentinel */ } ,
} ;
MODULE_DEVICE_TABLE ( of , scmi_of_match ) ;
2017-03-28 11:36:07 +01:00
static struct platform_driver scmi_driver = {
. driver = {
. name = " arm-scmi " ,
. of_match_table = scmi_of_match ,
2019-10-22 10:09:01 +01:00
. dev_groups = versions_groups ,
2017-03-28 11:36:07 +01:00
} ,
. probe = scmi_probe ,
. remove = scmi_remove ,
} ;
module_platform_driver ( scmi_driver ) ;
MODULE_ALIAS ( " platform: arm-scmi " ) ;
MODULE_AUTHOR ( " Sudeep Holla <sudeep.holla@arm.com> " ) ;
MODULE_DESCRIPTION ( " ARM SCMI protocol driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;