2017-03-28 11:36:07 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface ( SCMI ) Message Protocol driver
*
* SCMI Message Protocol is used between the System Control Processor ( SCP )
* and the Application Processors ( AP ) . The Message Handling Unit ( MHU )
* provides a mechanism for inter - processor communication between SCP ' s
* Cortex M3 and AP .
*
* SCP offers control and management of the core / cluster power states ,
* various power domain DVFS including the core / cluster , certain system
* clocks configuration , thermal sensors and many others .
*
2021-03-16 12:48:26 +00:00
* Copyright ( C ) 2018 - 2021 ARM Ltd .
2017-03-28 11:36:07 +01:00
*/
# include <linux/bitmap.h>
2021-03-16 12:48:28 +00:00
# include <linux/device.h>
2017-03-28 11:36:07 +01:00
# include <linux/export.h>
2021-03-16 12:48:26 +00:00
# include <linux/idr.h>
2017-03-28 11:36:07 +01:00
# include <linux/io.h>
# include <linux/kernel.h>
2017-07-20 14:39:57 +01:00
# include <linux/ktime.h>
2017-03-28 11:36:07 +01:00
# include <linux/module.h>
# include <linux/of_address.h>
# include <linux/of_device.h>
2017-07-20 14:39:57 +01:00
# include <linux/processor.h>
2021-03-16 12:48:26 +00:00
# include <linux/refcount.h>
2017-03-28 11:36:07 +01:00
# include <linux/slab.h>
# include "common.h"
2020-07-01 16:53:43 +01:00
# include "notify.h"
2017-03-28 11:36:07 +01:00
2019-12-17 13:43:45 +00:00
# define CREATE_TRACE_POINTS
# include <trace/events/scmi.h>
2017-03-28 11:36:07 +01:00
enum scmi_error_codes {
SCMI_SUCCESS = 0 , /* Success */
SCMI_ERR_SUPPORT = - 1 , /* Not supported */
SCMI_ERR_PARAMS = - 2 , /* Invalid Parameters */
SCMI_ERR_ACCESS = - 3 , /* Invalid access/permission denied */
SCMI_ERR_ENTRY = - 4 , /* Not found */
SCMI_ERR_RANGE = - 5 , /* Value out of range */
SCMI_ERR_BUSY = - 6 , /* Device busy */
SCMI_ERR_COMMS = - 7 , /* Communication Error */
SCMI_ERR_GENERIC = - 8 , /* Generic Error */
SCMI_ERR_HARDWARE = - 9 , /* Hardware Error */
SCMI_ERR_PROTOCOL = - 10 , /* Protocol Error */
SCMI_ERR_MAX
} ;
2018-05-09 17:52:06 +01:00
/* List of all SCMI devices active in system */
2017-03-28 11:36:07 +01:00
static LIST_HEAD ( scmi_list ) ;
/* Protection for the entire list */
static DEFINE_MUTEX ( scmi_list_mutex ) ;
2019-12-17 13:43:45 +00:00
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id ;
2017-03-28 11:36:07 +01:00
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
* @ xfer_block : Preallocated Message array
* @ xfer_alloc_table : Bitmap table for allocated messages .
* Index of this bitmap table is also used for message
* sequence identifier .
* @ xfer_lock : Protection for message allocation
*/
struct scmi_xfers_info {
struct scmi_xfer * xfer_block ;
unsigned long * xfer_alloc_table ;
spinlock_t xfer_lock ;
} ;
2021-03-16 12:48:26 +00:00
/**
* struct scmi_protocol_instance - Describe an initialized protocol instance .
2021-03-16 12:48:27 +00:00
* @ handle : Reference to the SCMI handle associated to this protocol instance .
2021-03-16 12:48:26 +00:00
* @ proto : A reference to the protocol descriptor .
* @ gid : A reference for per - protocol devres management .
* @ users : A refcount to track effective users of this protocol .
2021-03-16 12:48:27 +00:00
* @ priv : Reference for optional protocol private data .
* @ ph : An embedded protocol handle that will be passed down to protocol
* initialization code to identify this instance .
2021-03-16 12:48:26 +00:00
*
* Each protocol is initialized independently once for each SCMI platform in
* which is defined by DT and implemented by the SCMI server fw .
*/
struct scmi_protocol_instance {
2021-03-16 12:48:27 +00:00
const struct scmi_handle * handle ;
2021-03-16 12:48:26 +00:00
const struct scmi_protocol * proto ;
void * gid ;
refcount_t users ;
2021-03-16 12:48:27 +00:00
void * priv ;
struct scmi_protocol_handle ph ;
2021-03-16 12:48:26 +00:00
} ;
2021-03-16 12:48:27 +00:00
# define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
2017-03-28 11:36:07 +01:00
/**
2018-05-09 17:52:06 +01:00
* struct scmi_info - Structure representing a SCMI instance
2017-03-28 11:36:07 +01:00
*
* @ dev : Device pointer
* @ desc : SoC description for this instance
2017-06-06 11:16:15 +01:00
* @ version : SCMI revision information containing protocol version ,
* implementation version and ( sub - ) vendor identification .
2020-01-31 10:58:11 +05:30
* @ handle : Instance of SCMI handle to send to clients
2019-07-08 09:40:48 +01:00
* @ tx_minfo : Universal Transmit Message management info
2020-03-27 14:34:26 +00:00
* @ rx_minfo : Universal Receive Message management info
2019-07-08 09:40:44 +01:00
* @ tx_idr : IDR object to map protocol id to Tx channel info pointer
2019-07-08 09:40:46 +01:00
* @ rx_idr : IDR object to map protocol id to Rx channel info pointer
2021-03-16 12:48:26 +00:00
* @ protocols : IDR for protocols ' instance descriptors initialized for
* this SCMI instance : populated on protocol ' s first attempted
* usage .
* @ protocols_mtx : A mutex to protect protocols instances initialization .
2018-05-09 17:52:06 +01:00
* @ protocols_imp : List of protocols implemented , currently maximum of
2017-06-06 11:16:15 +01:00
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
2018-05-09 17:52:06 +01:00
* @ node : List head
2017-03-28 11:36:07 +01:00
* @ users : Number of users of this instance
*/
struct scmi_info {
struct device * dev ;
const struct scmi_desc * desc ;
2017-06-06 11:16:15 +01:00
struct scmi_revision_info version ;
2017-03-28 11:36:07 +01:00
struct scmi_handle handle ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info tx_minfo ;
2020-03-27 14:34:26 +00:00
struct scmi_xfers_info rx_minfo ;
2017-07-31 15:43:27 +01:00
struct idr tx_idr ;
2019-07-08 09:40:46 +01:00
struct idr rx_idr ;
2021-03-16 12:48:26 +00:00
struct idr protocols ;
/* Ensure mutual exclusive access to protocols instance array */
struct mutex protocols_mtx ;
2017-06-06 11:16:15 +01:00
u8 * protocols_imp ;
2017-03-28 11:36:07 +01:00
struct list_head node ;
int users ;
} ;
# define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
static const int scmi_linux_errmap [ ] = {
/* better than switch case as long as return value is continuous */
0 , /* SCMI_SUCCESS */
- EOPNOTSUPP , /* SCMI_ERR_SUPPORT */
- EINVAL , /* SCMI_ERR_PARAM */
- EACCES , /* SCMI_ERR_ACCESS */
- ENOENT , /* SCMI_ERR_ENTRY */
- ERANGE , /* SCMI_ERR_RANGE */
- EBUSY , /* SCMI_ERR_BUSY */
- ECOMM , /* SCMI_ERR_COMMS */
- EIO , /* SCMI_ERR_GENERIC */
- EREMOTEIO , /* SCMI_ERR_HARDWARE */
- EPROTO , /* SCMI_ERR_PROTOCOL */
} ;
static inline int scmi_to_linux_errno ( int errno )
{
if ( errno < SCMI_SUCCESS & & errno > SCMI_ERR_MAX )
return scmi_linux_errmap [ - errno ] ;
return - EIO ;
}
/**
* scmi_dump_header_dbg ( ) - Helper to dump a message header .
*
* @ dev : Device pointer corresponding to the SCMI entity
* @ hdr : pointer to header .
*/
static inline void scmi_dump_header_dbg ( struct device * dev ,
struct scmi_msg_hdr * hdr )
{
2019-07-08 09:40:40 +01:00
dev_dbg ( dev , " Message ID: %x Sequence ID: %x Protocol: %x \n " ,
2017-03-28 11:36:07 +01:00
hdr - > id , hdr - > seq , hdr - > protocol_id ) ;
}
/**
2018-05-09 17:52:06 +01:00
* scmi_xfer_get ( ) - Allocate one message
2017-03-28 11:36:07 +01:00
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2019-07-08 09:40:48 +01:00
* @ minfo : Pointer to Tx / Rx Message management info based on channel type
2017-03-28 11:36:07 +01:00
*
2019-07-08 09:40:40 +01:00
* Helper function which is used by various message functions that are
2017-03-28 11:36:07 +01:00
* exposed to clients of this driver for allocating a message traffic event .
*
* This function can sleep depending on pending requests already in the system
* for the SCMI entity . Further , this also holds a spinlock to maintain
* integrity of internal data structures .
*
* Return : 0 if all went fine , else corresponding error .
*/
2019-07-08 09:40:48 +01:00
static struct scmi_xfer * scmi_xfer_get ( const struct scmi_handle * handle ,
struct scmi_xfers_info * minfo )
2017-03-28 11:36:07 +01:00
{
u16 xfer_id ;
struct scmi_xfer * xfer ;
unsigned long flags , bit_pos ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
/* Keep the locked section as small as possible */
spin_lock_irqsave ( & minfo - > xfer_lock , flags ) ;
bit_pos = find_first_zero_bit ( minfo - > xfer_alloc_table ,
info - > desc - > max_msg ) ;
if ( bit_pos = = info - > desc - > max_msg ) {
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
return ERR_PTR ( - ENOMEM ) ;
}
set_bit ( bit_pos , minfo - > xfer_alloc_table ) ;
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
xfer_id = bit_pos ;
xfer = & minfo - > xfer_block [ xfer_id ] ;
xfer - > hdr . seq = xfer_id ;
reinit_completion ( & xfer - > done ) ;
2019-12-17 13:43:45 +00:00
xfer - > transfer_id = atomic_inc_return ( & transfer_last_id ) ;
2017-03-28 11:36:07 +01:00
return xfer ;
}
/**
2019-07-08 09:40:48 +01:00
* __scmi_xfer_put ( ) - Release a message
2017-03-28 11:36:07 +01:00
*
2019-07-08 09:40:48 +01:00
* @ minfo : Pointer to Tx / Rx Message management info based on channel type
2018-05-09 17:52:06 +01:00
* @ xfer : message that was reserved by scmi_xfer_get
2017-03-28 11:36:07 +01:00
*
* This holds a spinlock to maintain integrity of internal data structures .
*/
2019-07-08 09:40:48 +01:00
static void
__scmi_xfer_put ( struct scmi_xfers_info * minfo , struct scmi_xfer * xfer )
2017-03-28 11:36:07 +01:00
{
unsigned long flags ;
/*
* Keep the locked section as small as possible
* NOTE : we might escape with smp_mb and no lock here . .
* but just be conservative and symmetric .
*/
spin_lock_irqsave ( & minfo - > xfer_lock , flags ) ;
clear_bit ( xfer - > hdr . seq , minfo - > xfer_alloc_table ) ;
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
}
2020-03-27 14:34:29 +00:00
static void scmi_handle_notification ( struct scmi_chan_info * cinfo , u32 msg_hdr )
2019-07-08 09:40:42 +01:00
{
2020-01-31 10:58:13 +05:30
struct scmi_xfer * xfer ;
2020-03-27 14:34:29 +00:00
struct device * dev = cinfo - > dev ;
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
struct scmi_xfers_info * minfo = & info - > rx_minfo ;
2020-07-10 14:39:19 +01:00
ktime_t ts ;
2020-03-27 14:34:29 +00:00
2020-07-10 14:39:19 +01:00
ts = ktime_get_boottime ( ) ;
2020-03-27 14:34:29 +00:00
xfer = scmi_xfer_get ( cinfo - > handle , minfo ) ;
if ( IS_ERR ( xfer ) ) {
dev_err ( dev , " failed to get free message slot (%ld) \n " ,
PTR_ERR ( xfer ) ) ;
2020-04-20 16:23:12 +01:00
info - > desc - > ops - > clear_channel ( cinfo ) ;
2020-03-27 14:34:29 +00:00
return ;
}
unpack_scmi_header ( msg_hdr , & xfer - > hdr ) ;
scmi_dump_header_dbg ( dev , & xfer - > hdr ) ;
info - > desc - > ops - > fetch_notification ( cinfo , info - > desc - > max_msg_size ,
xfer ) ;
2020-07-01 16:53:43 +01:00
scmi_notify ( cinfo - > handle , xfer - > hdr . protocol_id ,
xfer - > hdr . id , xfer - > rx . buf , xfer - > rx . len , ts ) ;
2020-03-27 14:34:29 +00:00
trace_scmi_rx_done ( xfer - > transfer_id , xfer - > hdr . id ,
xfer - > hdr . protocol_id , xfer - > hdr . seq ,
MSG_TYPE_NOTIFICATION ) ;
2019-07-08 09:40:54 +01:00
2020-03-27 14:34:29 +00:00
__scmi_xfer_put ( minfo , xfer ) ;
2020-04-20 16:23:12 +01:00
info - > desc - > ops - > clear_channel ( cinfo ) ;
2020-03-27 14:34:29 +00:00
}
static void scmi_handle_response ( struct scmi_chan_info * cinfo ,
u16 xfer_id , u8 msg_type )
{
struct scmi_xfer * xfer ;
struct device * dev = cinfo - > dev ;
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
struct scmi_xfers_info * minfo = & info - > tx_minfo ;
2019-07-08 09:40:42 +01:00
/* Are we even expecting this? */
if ( ! test_bit ( xfer_id , minfo - > xfer_alloc_table ) ) {
dev_err ( dev , " message for %d is not expected! \n " , xfer_id ) ;
2020-04-20 16:23:13 +01:00
info - > desc - > ops - > clear_channel ( cinfo ) ;
2019-07-08 09:40:42 +01:00
return ;
}
xfer = & minfo - > xfer_block [ xfer_id ] ;
2020-04-20 16:23:15 +01:00
/*
* Even if a response was indeed expected on this slot at this point ,
* a buggy platform could wrongly reply feeding us an unexpected
* delayed response we ' re not prepared to handle : bail - out safely
* blaming firmware .
*/
if ( unlikely ( msg_type = = MSG_TYPE_DELAYED_RESP & & ! xfer - > async_done ) ) {
dev_err ( dev ,
" Delayed Response for %d not expected! Buggy F/W ? \n " ,
xfer_id ) ;
info - > desc - > ops - > clear_channel ( cinfo ) ;
/* It was unexpected, so nobody will clear the xfer if not us */
__scmi_xfer_put ( minfo , xfer ) ;
return ;
}
2019-07-08 09:40:42 +01:00
scmi_dump_header_dbg ( dev , & xfer - > hdr ) ;
2020-01-31 10:58:13 +05:30
info - > desc - > ops - > fetch_response ( cinfo , xfer ) ;
2019-07-08 09:40:54 +01:00
2019-12-17 13:43:45 +00:00
trace_scmi_rx_done ( xfer - > transfer_id , xfer - > hdr . id ,
xfer - > hdr . protocol_id , xfer - > hdr . seq ,
msg_type ) ;
2020-04-20 16:23:14 +01:00
if ( msg_type = = MSG_TYPE_DELAYED_RESP ) {
info - > desc - > ops - > clear_channel ( cinfo ) ;
2019-07-08 09:40:54 +01:00
complete ( xfer - > async_done ) ;
2020-04-20 16:23:14 +01:00
} else {
2019-07-08 09:40:54 +01:00
complete ( & xfer - > done ) ;
2020-04-20 16:23:14 +01:00
}
2019-07-08 09:40:42 +01:00
}
2020-03-27 14:34:29 +00:00
/**
* scmi_rx_callback ( ) - callback for receiving messages
*
* @ cinfo : SCMI channel info
* @ msg_hdr : Message header
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer .
*
* NOTE : This function will be invoked in IRQ context , hence should be
* as optimal as possible .
*/
void scmi_rx_callback ( struct scmi_chan_info * cinfo , u32 msg_hdr )
{
u16 xfer_id = MSG_XTRACT_TOKEN ( msg_hdr ) ;
u8 msg_type = MSG_XTRACT_TYPE ( msg_hdr ) ;
switch ( msg_type ) {
case MSG_TYPE_NOTIFICATION :
scmi_handle_notification ( cinfo , msg_hdr ) ;
break ;
case MSG_TYPE_COMMAND :
case MSG_TYPE_DELAYED_RESP :
scmi_handle_response ( cinfo , xfer_id , msg_type ) ;
break ;
default :
WARN_ONCE ( 1 , " received unknown msg_type:%d \n " , msg_type ) ;
break ;
}
}
2021-03-16 12:48:33 +00:00
/* Transient code wrapper to ease API migration */
const struct scmi_protocol_handle *
scmi_map_protocol_handle ( const struct scmi_handle * handle , u8 prot_id )
{
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
const struct scmi_protocol_instance * pi ;
mutex_lock ( & info - > protocols_mtx ) ;
pi = idr_find ( & info - > protocols , prot_id ) ;
mutex_unlock ( & info - > protocols_mtx ) ;
return pi ? & pi - > ph : NULL ;
}
/* Transient code wrapper to ease API migration */
struct scmi_handle * scmi_map_scmi_handle ( const struct scmi_protocol_handle * ph )
{
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
return ( struct scmi_handle * ) pi - > handle ;
}
2019-07-08 09:40:48 +01:00
/**
2021-03-16 12:48:33 +00:00
* xfer_put ( ) - Release a transmit message
2019-07-08 09:40:48 +01:00
*
2021-03-16 12:48:33 +00:00
* @ ph : Pointer to SCMI protocol handle
2019-07-08 09:40:48 +01:00
* @ xfer : message that was reserved by scmi_xfer_get
*/
2021-03-16 12:48:33 +00:00
static void xfer_put ( const struct scmi_protocol_handle * ph ,
struct scmi_xfer * xfer )
2019-07-08 09:40:48 +01:00
{
2021-03-16 12:48:33 +00:00
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
struct scmi_info * info = handle_to_scmi_info ( pi - > handle ) ;
2019-07-08 09:40:48 +01:00
__scmi_xfer_put ( & info - > tx_minfo , xfer ) ;
}
2021-03-16 12:48:33 +00:00
void scmi_xfer_put ( const struct scmi_handle * h , struct scmi_xfer * xfer )
{
const struct scmi_protocol_handle * ph ;
ph = scmi_map_protocol_handle ( h , xfer - > hdr . protocol_id ) ;
if ( ! ph )
return ;
return xfer_put ( ph , xfer ) ;
}
2017-07-20 14:39:57 +01:00
# define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
2020-01-31 10:58:13 +05:30
static bool scmi_xfer_done_no_timeout ( struct scmi_chan_info * cinfo ,
2017-07-20 14:39:57 +01:00
struct scmi_xfer * xfer , ktime_t stop )
{
2020-01-31 10:58:13 +05:30
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
2017-07-20 14:39:57 +01:00
2020-01-31 10:58:13 +05:30
return info - > desc - > ops - > poll_done ( cinfo , xfer ) | |
ktime_after ( ktime_get ( ) , stop ) ;
2017-07-20 14:39:57 +01:00
}
2017-03-28 11:36:07 +01:00
/**
2021-03-16 12:48:33 +00:00
* do_xfer ( ) - Do one transfer
2017-03-28 11:36:07 +01:00
*
2021-03-16 12:48:33 +00:00
* @ ph : Pointer to SCMI protocol handle
2017-03-28 11:36:07 +01:00
* @ xfer : Transfer to initiate and wait for response
*
* Return : - ETIMEDOUT in case of no response , if transmit error ,
2018-05-09 17:52:06 +01:00
* return corresponding error , else if all goes well ,
* return 0.
2017-03-28 11:36:07 +01:00
*/
2021-03-16 12:48:33 +00:00
static int do_xfer ( const struct scmi_protocol_handle * ph ,
struct scmi_xfer * xfer )
2017-03-28 11:36:07 +01:00
{
int ret ;
int timeout ;
2021-03-16 12:48:33 +00:00
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
struct scmi_info * info = handle_to_scmi_info ( pi - > handle ) ;
2017-03-28 11:36:07 +01:00
struct device * dev = info - > dev ;
2017-07-31 15:43:27 +01:00
struct scmi_chan_info * cinfo ;
2021-03-16 12:48:33 +00:00
/*
* Re - instate protocol id here from protocol handle so that cannot be
* overridden by mistake ( or malice ) by the protocol code mangling with
* the scmi_xfer structure .
*/
xfer - > hdr . protocol_id = pi - > proto - > id ;
2017-07-31 15:43:27 +01:00
cinfo = idr_find ( & info - > tx_idr , xfer - > hdr . protocol_id ) ;
if ( unlikely ( ! cinfo ) )
return - EINVAL ;
2017-03-28 11:36:07 +01:00
2019-12-17 13:43:45 +00:00
trace_scmi_xfer_begin ( xfer - > transfer_id , xfer - > hdr . id ,
xfer - > hdr . protocol_id , xfer - > hdr . seq ,
xfer - > hdr . poll_completion ) ;
2020-01-31 10:58:13 +05:30
ret = info - > desc - > ops - > send_message ( cinfo , xfer ) ;
2017-03-28 11:36:07 +01:00
if ( ret < 0 ) {
2020-01-31 10:58:13 +05:30
dev_dbg ( dev , " Failed to send message %d \n " , ret ) ;
2017-03-28 11:36:07 +01:00
return ret ;
}
2017-07-20 14:39:57 +01:00
if ( xfer - > hdr . poll_completion ) {
ktime_t stop = ktime_add_ns ( ktime_get ( ) , SCMI_MAX_POLL_TO_NS ) ;
2017-07-31 15:25:32 +01:00
spin_until_cond ( scmi_xfer_done_no_timeout ( cinfo , xfer , stop ) ) ;
2017-07-20 14:39:57 +01:00
if ( ktime_before ( ktime_get ( ) , stop ) )
2020-01-31 10:58:13 +05:30
info - > desc - > ops - > fetch_response ( cinfo , xfer ) ;
2017-07-20 14:39:57 +01:00
else
ret = - ETIMEDOUT ;
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies ( info - > desc - > max_rx_timeout_ms ) ;
if ( ! wait_for_completion_timeout ( & xfer - > done , timeout ) ) {
2020-01-31 10:58:13 +05:30
dev_err ( dev , " timed out in resp(caller: %pS) \n " ,
2017-07-20 14:39:57 +01:00
( void * ) _RET_IP_ ) ;
ret = - ETIMEDOUT ;
}
2017-03-28 11:36:07 +01:00
}
2017-07-20 14:39:57 +01:00
if ( ! ret & & xfer - > hdr . status )
ret = scmi_to_linux_errno ( xfer - > hdr . status ) ;
2020-01-31 10:58:13 +05:30
if ( info - > desc - > ops - > mark_txdone )
info - > desc - > ops - > mark_txdone ( cinfo , ret ) ;
2017-03-28 11:36:07 +01:00
2019-12-17 13:43:45 +00:00
trace_scmi_xfer_end ( xfer - > transfer_id , xfer - > hdr . id ,
2020-06-09 14:45:03 +01:00
xfer - > hdr . protocol_id , xfer - > hdr . seq , ret ) ;
2019-12-17 13:43:45 +00:00
2017-03-28 11:36:07 +01:00
return ret ;
}
2021-03-16 12:48:33 +00:00
int scmi_do_xfer ( const struct scmi_handle * h , struct scmi_xfer * xfer )
{
const struct scmi_protocol_handle * ph ;
ph = scmi_map_protocol_handle ( h , xfer - > hdr . protocol_id ) ;
if ( ! ph )
return - EINVAL ;
return do_xfer ( ph , xfer ) ;
}
static void reset_rx_to_maxsz ( const struct scmi_protocol_handle * ph ,
struct scmi_xfer * xfer )
{
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
struct scmi_info * info = handle_to_scmi_info ( pi - > handle ) ;
xfer - > rx . len = info - > desc - > max_msg_size ;
}
2020-10-12 14:26:24 +01:00
void scmi_reset_rx_to_maxsz ( const struct scmi_handle * handle ,
struct scmi_xfer * xfer )
{
2021-03-16 12:48:33 +00:00
const struct scmi_protocol_handle * ph ;
2020-10-12 14:26:24 +01:00
2021-03-16 12:48:33 +00:00
ph = scmi_map_protocol_handle ( handle , xfer - > hdr . protocol_id ) ;
if ( ! ph )
return ;
return reset_rx_to_maxsz ( ph , xfer ) ;
2020-10-12 14:26:24 +01:00
}
2019-07-08 09:40:54 +01:00
# define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
2021-03-16 12:48:33 +00:00
* do_xfer_with_response ( ) - Do one transfer and wait until the delayed
2019-07-08 09:40:54 +01:00
* response is received
*
2021-03-16 12:48:33 +00:00
* @ ph : Pointer to SCMI protocol handle
2019-07-08 09:40:54 +01:00
* @ xfer : Transfer to initiate and wait for response
*
* Return : - ETIMEDOUT in case of no delayed response , if transmit error ,
* return corresponding error , else if all goes well , return 0.
*/
2021-03-16 12:48:33 +00:00
static int do_xfer_with_response ( const struct scmi_protocol_handle * ph ,
struct scmi_xfer * xfer )
2019-07-08 09:40:54 +01:00
{
int ret , timeout = msecs_to_jiffies ( SCMI_MAX_RESPONSE_TIMEOUT ) ;
2021-03-16 12:48:33 +00:00
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
2019-07-08 09:40:54 +01:00
DECLARE_COMPLETION_ONSTACK ( async_response ) ;
2021-03-16 12:48:33 +00:00
xfer - > hdr . protocol_id = pi - > proto - > id ;
2019-07-08 09:40:54 +01:00
xfer - > async_done = & async_response ;
2021-03-16 12:48:33 +00:00
ret = do_xfer ( ph , xfer ) ;
2019-07-08 09:40:54 +01:00
if ( ! ret & & ! wait_for_completion_timeout ( xfer - > async_done , timeout ) )
ret = - ETIMEDOUT ;
xfer - > async_done = NULL ;
return ret ;
}
2021-03-16 12:48:33 +00:00
int scmi_do_xfer_with_response ( const struct scmi_handle * h ,
struct scmi_xfer * xfer )
{
const struct scmi_protocol_handle * ph ;
ph = scmi_map_protocol_handle ( h , xfer - > hdr . protocol_id ) ;
if ( ! ph )
return - EINVAL ;
return do_xfer_with_response ( ph , xfer ) ;
}
2017-03-28 11:36:07 +01:00
/**
2021-03-16 12:48:33 +00:00
* xfer_get_init ( ) - Allocate and initialise one message for transmit
2017-03-28 11:36:07 +01:00
*
2021-03-16 12:48:33 +00:00
* @ ph : Pointer to SCMI protocol handle
2017-03-28 11:36:07 +01:00
* @ msg_id : Message identifier
* @ tx_size : transmit message size
* @ rx_size : receive message size
* @ p : pointer to the allocated and initialised message
*
2018-05-09 17:52:06 +01:00
* This function allocates the message using @ scmi_xfer_get and
2017-03-28 11:36:07 +01:00
* initialise the header .
*
* Return : 0 if all went fine with @ p pointing to message , else
* corresponding error .
*/
2021-03-16 12:48:33 +00:00
static int xfer_get_init ( const struct scmi_protocol_handle * ph ,
u8 msg_id , size_t tx_size , size_t rx_size ,
struct scmi_xfer * * p )
2017-03-28 11:36:07 +01:00
{
int ret ;
struct scmi_xfer * xfer ;
2021-03-16 12:48:33 +00:00
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
struct scmi_info * info = handle_to_scmi_info ( pi - > handle ) ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info * minfo = & info - > tx_minfo ;
2017-03-28 11:36:07 +01:00
struct device * dev = info - > dev ;
/* Ensure we have sane transfer sizes */
if ( rx_size > info - > desc - > max_msg_size | |
tx_size > info - > desc - > max_msg_size )
return - ERANGE ;
2021-03-16 12:48:33 +00:00
xfer = scmi_xfer_get ( pi - > handle , minfo ) ;
2017-03-28 11:36:07 +01:00
if ( IS_ERR ( xfer ) ) {
ret = PTR_ERR ( xfer ) ;
dev_err ( dev , " failed to get free message slot(%d) \n " , ret ) ;
return ret ;
}
xfer - > tx . len = tx_size ;
xfer - > rx . len = rx_size ? : info - > desc - > max_msg_size ;
xfer - > hdr . id = msg_id ;
2021-03-16 12:48:33 +00:00
xfer - > hdr . protocol_id = pi - > proto - > id ;
2017-03-28 11:36:07 +01:00
xfer - > hdr . poll_completion = false ;
* p = xfer ;
2018-05-09 17:52:06 +01:00
2017-03-28 11:36:07 +01:00
return 0 ;
}
2021-03-16 12:48:33 +00:00
int scmi_xfer_get_init ( const struct scmi_handle * h , u8 msg_id , u8 prot_id ,
size_t tx_size , size_t rx_size , struct scmi_xfer * * p )
{
const struct scmi_protocol_handle * ph ;
ph = scmi_map_protocol_handle ( h , prot_id ) ;
if ( ! ph )
return - EINVAL ;
return xfer_get_init ( ph , msg_id , tx_size , rx_size , p ) ;
}
2017-06-06 11:16:15 +01:00
/**
2021-03-16 12:48:33 +00:00
* version_get ( ) - command to get the revision of the SCMI entity
2017-06-06 11:16:15 +01:00
*
2021-03-16 12:48:33 +00:00
* @ ph : Pointer to SCMI protocol handle
2018-05-09 17:52:06 +01:00
* @ version : Holds returned version of protocol .
2017-06-06 11:16:15 +01:00
*
* Updates the SCMI information in the internal data structure .
*
* Return : 0 if all went fine , else return appropriate error .
*/
2021-03-16 12:48:33 +00:00
static int version_get ( const struct scmi_protocol_handle * ph , u32 * version )
2017-06-06 11:16:15 +01:00
{
int ret ;
__le32 * rev_info ;
struct scmi_xfer * t ;
2021-03-16 12:48:33 +00:00
ret = xfer_get_init ( ph , PROTOCOL_VERSION , 0 , sizeof ( * version ) , & t ) ;
2017-06-06 11:16:15 +01:00
if ( ret )
return ret ;
2021-03-16 12:48:33 +00:00
ret = do_xfer ( ph , t ) ;
2017-06-06 11:16:15 +01:00
if ( ! ret ) {
rev_info = t - > rx . buf ;
* version = le32_to_cpu ( * rev_info ) ;
}
2021-03-16 12:48:33 +00:00
xfer_put ( ph , t ) ;
2017-06-06 11:16:15 +01:00
return ret ;
}
2021-03-16 12:48:33 +00:00
int scmi_version_get ( const struct scmi_handle * h , u8 protocol , u32 * version )
{
const struct scmi_protocol_handle * ph ;
ph = scmi_map_protocol_handle ( h , protocol ) ;
if ( ! ph )
return - EINVAL ;
return version_get ( ph , version ) ;
}
2021-03-16 12:48:27 +00:00
/**
* scmi_set_protocol_priv - Set protocol specific data at init time
*
* @ ph : A reference to the protocol handle .
* @ priv : The private data to set .
*
* Return : 0 on Success
*/
static int scmi_set_protocol_priv ( const struct scmi_protocol_handle * ph ,
void * priv )
{
struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
pi - > priv = priv ;
return 0 ;
}
/**
* scmi_get_protocol_priv - Set protocol specific data at init time
*
* @ ph : A reference to the protocol handle .
*
* Return : Protocol private data if any was set .
*/
static void * scmi_get_protocol_priv ( const struct scmi_protocol_handle * ph )
{
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
return pi - > priv ;
}
2021-03-16 12:48:33 +00:00
static const struct scmi_xfer_ops xfer_ops = {
. version_get = version_get ,
. xfer_get_init = xfer_get_init ,
. reset_rx_to_maxsz = reset_rx_to_maxsz ,
. do_xfer = do_xfer ,
. do_xfer_with_response = do_xfer_with_response ,
. xfer_put = xfer_put ,
} ;
2021-03-16 12:48:34 +00:00
/**
* scmi_revision_area_get - Retrieve version memory area .
*
* @ ph : A reference to the protocol handle .
*
* A helper to grab the version memory area reference during SCMI Base protocol
* initialization .
*
* Return : A reference to the version memory area associated to the SCMI
* instance underlying this protocol handle .
*/
struct scmi_revision_info *
scmi_revision_area_get ( const struct scmi_protocol_handle * ph )
{
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
return pi - > handle - > version ;
}
2021-03-16 12:48:26 +00:00
/**
* scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
* instance descriptor .
* @ info : The reference to the related SCMI instance .
* @ proto : The protocol descriptor .
*
* Allocate a new protocol instance descriptor , using the provided @ proto
* description , against the specified SCMI instance @ info , and initialize it ;
* all resources management is handled via a dedicated per - protocol devres
* group .
*
* Context : Assumes to be called with @ protocols_mtx already acquired .
* Return : A reference to a freshly allocated and initialized protocol instance
* or ERR_PTR on failure .
*/
static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance ( struct scmi_info * info ,
const struct scmi_protocol * proto )
{
int ret = - ENOMEM ;
void * gid ;
struct scmi_protocol_instance * pi ;
struct scmi_handle * handle = & info - > handle ;
/* Protocol specific devres group */
gid = devres_open_group ( handle - > dev , NULL , GFP_KERNEL ) ;
if ( ! gid )
goto out ;
pi = devm_kzalloc ( handle - > dev , sizeof ( * pi ) , GFP_KERNEL ) ;
if ( ! pi )
goto clean ;
pi - > gid = gid ;
pi - > proto = proto ;
2021-03-16 12:48:27 +00:00
pi - > handle = handle ;
pi - > ph . dev = handle - > dev ;
2021-03-16 12:48:33 +00:00
pi - > ph . xops = & xfer_ops ;
2021-03-16 12:48:27 +00:00
pi - > ph . set_priv = scmi_set_protocol_priv ;
pi - > ph . get_priv = scmi_get_protocol_priv ;
2021-03-16 12:48:26 +00:00
refcount_set ( & pi - > users , 1 ) ;
/* proto->init is assured NON NULL by scmi_protocol_register */
2021-03-16 12:48:33 +00:00
ret = pi - > proto - > instance_init ( & pi - > ph ) ;
2021-03-16 12:48:26 +00:00
if ( ret )
goto clean ;
ret = idr_alloc ( & info - > protocols , pi , proto - > id , proto - > id + 1 ,
GFP_KERNEL ) ;
if ( ret ! = proto - > id )
goto clean ;
2021-03-16 12:48:31 +00:00
/*
* Warn but ignore events registration errors since we do not want
* to skip whole protocols if their notifications are messed up .
*/
if ( pi - > proto - > events ) {
ret = scmi_register_protocol_events ( handle , pi - > proto - > id ,
2021-03-16 12:48:32 +00:00
& pi - > ph ,
2021-03-16 12:48:31 +00:00
pi - > proto - > events ) ;
if ( ret )
dev_warn ( handle - > dev ,
" Protocol:%X - Events Registration Failed - err:%d \n " ,
pi - > proto - > id , ret ) ;
}
2021-03-16 12:48:26 +00:00
devres_close_group ( handle - > dev , pi - > gid ) ;
dev_dbg ( handle - > dev , " Initialized protocol: 0x%X \n " , pi - > proto - > id ) ;
return pi ;
clean :
devres_release_group ( handle - > dev , gid ) ;
out :
return ERR_PTR ( ret ) ;
}
/**
* scmi_get_protocol_instance - Protocol initialization helper .
* @ handle : A reference to the SCMI platform instance .
* @ protocol_id : The protocol being requested .
*
* In case the required protocol has never been requested before for this
* instance , allocate and initialize all the needed structures while handling
* resource allocation with a dedicated per - protocol devres subgroup .
*
* Return : A reference to an initialized protocol instance or error on failure .
*/
static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance ( struct scmi_handle * handle , u8 protocol_id )
{
struct scmi_protocol_instance * pi ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
mutex_lock ( & info - > protocols_mtx ) ;
pi = idr_find ( & info - > protocols , protocol_id ) ;
if ( pi ) {
refcount_inc ( & pi - > users ) ;
} else {
const struct scmi_protocol * proto ;
/* Fails if protocol not registered on bus */
proto = scmi_protocol_get ( protocol_id ) ;
if ( proto )
pi = scmi_alloc_init_protocol_instance ( info , proto ) ;
else
pi = ERR_PTR ( - ENODEV ) ;
}
mutex_unlock ( & info - > protocols_mtx ) ;
return pi ;
}
/**
* scmi_protocol_acquire - Protocol acquire
* @ handle : A reference to the SCMI platform instance .
* @ protocol_id : The protocol being requested .
*
* Register a new user for the requested protocol on the specified SCMI
* platform instance , possibly triggering its initialization on first user .
*
* Return : 0 if protocol was acquired successfully .
*/
int scmi_protocol_acquire ( struct scmi_handle * handle , u8 protocol_id )
{
return PTR_ERR_OR_ZERO ( scmi_get_protocol_instance ( handle , protocol_id ) ) ;
}
/**
* scmi_protocol_release - Protocol de - initialization helper .
* @ handle : A reference to the SCMI platform instance .
* @ protocol_id : The protocol being requested .
*
* Remove one user for the specified protocol and triggers de - initialization
* and resources de - allocation once the last user has gone .
*/
void scmi_protocol_release ( struct scmi_handle * handle , u8 protocol_id )
{
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
struct scmi_protocol_instance * pi ;
mutex_lock ( & info - > protocols_mtx ) ;
pi = idr_find ( & info - > protocols , protocol_id ) ;
if ( WARN_ON ( ! pi ) )
goto out ;
if ( refcount_dec_and_test ( & pi - > users ) ) {
void * gid = pi - > gid ;
2021-03-16 12:48:31 +00:00
if ( pi - > proto - > events )
scmi_deregister_protocol_events ( handle , protocol_id ) ;
2021-03-16 12:48:26 +00:00
if ( pi - > proto - > instance_deinit )
2021-03-16 12:48:33 +00:00
pi - > proto - > instance_deinit ( & pi - > ph ) ;
2021-03-16 12:48:26 +00:00
idr_remove ( & info - > protocols , protocol_id ) ;
devres_release_group ( handle - > dev , gid ) ;
dev_dbg ( handle - > dev , " De-Initialized protocol: 0x%X \n " ,
protocol_id ) ;
}
out :
mutex_unlock ( & info - > protocols_mtx ) ;
}
2021-03-16 12:48:35 +00:00
void scmi_setup_protocol_implemented ( const struct scmi_protocol_handle * ph ,
2017-06-06 11:16:15 +01:00
u8 * prot_imp )
{
2021-03-16 12:48:35 +00:00
const struct scmi_protocol_instance * pi = ph_to_pi ( ph ) ;
struct scmi_info * info = handle_to_scmi_info ( pi - > handle ) ;
2017-06-06 11:16:15 +01:00
info - > protocols_imp = prot_imp ;
}
2017-06-06 11:39:08 +01:00
static bool
scmi_is_protocol_implemented ( const struct scmi_handle * handle , u8 prot_id )
{
int i ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
if ( ! info - > protocols_imp )
return false ;
for ( i = 0 ; i < MAX_PROTOCOLS_IMP ; i + + )
if ( info - > protocols_imp [ i ] = = prot_id )
return true ;
return false ;
}
2021-03-16 12:48:28 +00:00
struct scmi_protocol_devres {
struct scmi_handle * handle ;
u8 protocol_id ;
} ;
static void scmi_devm_release_protocol ( struct device * dev , void * res )
{
struct scmi_protocol_devres * dres = res ;
scmi_protocol_release ( dres - > handle , dres - > protocol_id ) ;
}
/**
* scmi_devm_protocol_get - Devres managed get protocol operations and handle
* @ sdev : A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting .
* @ protocol_id : The protocol being requested .
* @ ph : A pointer reference used to pass back the associated protocol handle .
*
* Get hold of a protocol accounting for its usage , eventually triggering its
* initialization , and returning the protocol specific operations and related
* protocol handle which will be used as first argument in most of the
* protocols operations methods .
* Being a devres based managed method , protocol hold will be automatically
* released , and possibly de - initialized on last user , once the SCMI driver
* owning the scmi_device is unbound from it .
*
* Return : A reference to the requested protocol operations or error .
* Must be checked for errors by caller .
*/
static const void __must_check *
scmi_devm_protocol_get ( struct scmi_device * sdev , u8 protocol_id ,
struct scmi_protocol_handle * * ph )
{
struct scmi_protocol_instance * pi ;
struct scmi_protocol_devres * dres ;
struct scmi_handle * handle = sdev - > handle ;
if ( ! ph )
return ERR_PTR ( - EINVAL ) ;
dres = devres_alloc ( scmi_devm_release_protocol ,
sizeof ( * dres ) , GFP_KERNEL ) ;
if ( ! dres )
return ERR_PTR ( - ENOMEM ) ;
pi = scmi_get_protocol_instance ( handle , protocol_id ) ;
if ( IS_ERR ( pi ) ) {
devres_free ( dres ) ;
return pi ;
}
dres - > handle = handle ;
dres - > protocol_id = protocol_id ;
devres_add ( & sdev - > dev , dres ) ;
* ph = & pi - > ph ;
return pi - > proto - > ops ;
}
static int scmi_devm_protocol_match ( struct device * dev , void * res , void * data )
{
struct scmi_protocol_devres * dres = res ;
if ( WARN_ON ( ! dres | | ! data ) )
return 0 ;
return dres - > protocol_id = = * ( ( u8 * ) data ) ;
}
/**
* scmi_devm_protocol_put - Devres managed put protocol operations and handle
* @ sdev : A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting .
* @ protocol_id : The protocol being requested .
*
* Explicitly release a protocol hold previously obtained calling the above
* @ scmi_devm_protocol_get .
*/
static void scmi_devm_protocol_put ( struct scmi_device * sdev , u8 protocol_id )
{
int ret ;
ret = devres_release ( & sdev - > dev , scmi_devm_release_protocol ,
scmi_devm_protocol_match , & protocol_id ) ;
WARN_ON ( ret ) ;
}
2017-03-28 11:36:07 +01:00
/**
2018-05-09 17:52:06 +01:00
* scmi_handle_get ( ) - Get the SCMI handle for a device
2017-03-28 11:36:07 +01:00
*
* @ dev : pointer to device for which we want SCMI handle
*
* NOTE : The function does not track individual clients of the framework
2018-05-09 17:52:06 +01:00
* and is expected to be maintained by caller of SCMI protocol library .
2017-03-28 11:36:07 +01:00
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return : pointer to handle if successful , NULL on error
*/
struct scmi_handle * scmi_handle_get ( struct device * dev )
{
struct list_head * p ;
struct scmi_info * info ;
struct scmi_handle * handle = NULL ;
mutex_lock ( & scmi_list_mutex ) ;
list_for_each ( p , & scmi_list ) {
info = list_entry ( p , struct scmi_info , node ) ;
if ( dev - > parent = = info - > dev ) {
handle = & info - > handle ;
info - > users + + ;
break ;
}
}
mutex_unlock ( & scmi_list_mutex ) ;
return handle ;
}
/**
* scmi_handle_put ( ) - Release the handle acquired by scmi_handle_get
*
* @ handle : handle acquired by scmi_handle_get
*
* NOTE : The function does not track individual clients of the framework
2018-05-09 17:52:06 +01:00
* and is expected to be maintained by caller of SCMI protocol library .
2017-03-28 11:36:07 +01:00
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return : 0 is successfully released
* if null was passed , it returns - EINVAL ;
*/
int scmi_handle_put ( const struct scmi_handle * handle )
{
struct scmi_info * info ;
if ( ! handle )
return - EINVAL ;
info = handle_to_scmi_info ( handle ) ;
mutex_lock ( & scmi_list_mutex ) ;
if ( ! WARN_ON ( ! info - > users ) )
info - > users - - ;
mutex_unlock ( & scmi_list_mutex ) ;
return 0 ;
}
2020-03-27 14:34:26 +00:00
static int __scmi_xfer_info_init ( struct scmi_info * sinfo ,
struct scmi_xfers_info * info )
2017-03-28 11:36:07 +01:00
{
int i ;
struct scmi_xfer * xfer ;
struct device * dev = sinfo - > dev ;
const struct scmi_desc * desc = sinfo - > desc ;
/* Pre-allocated messages, no more than what hdr.seq can support */
2018-05-09 17:52:06 +01:00
if ( WARN_ON ( desc - > max_msg > = MSG_TOKEN_MAX ) ) {
dev_err ( dev , " Maximum message of %d exceeds supported %ld \n " ,
desc - > max_msg , MSG_TOKEN_MAX ) ;
2017-03-28 11:36:07 +01:00
return - EINVAL ;
}
info - > xfer_block = devm_kcalloc ( dev , desc - > max_msg ,
sizeof ( * info - > xfer_block ) , GFP_KERNEL ) ;
if ( ! info - > xfer_block )
return - ENOMEM ;
info - > xfer_alloc_table = devm_kcalloc ( dev , BITS_TO_LONGS ( desc - > max_msg ) ,
sizeof ( long ) , GFP_KERNEL ) ;
if ( ! info - > xfer_alloc_table )
return - ENOMEM ;
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for ( i = 0 , xfer = info - > xfer_block ; i < desc - > max_msg ; i + + , xfer + + ) {
xfer - > rx . buf = devm_kcalloc ( dev , sizeof ( u8 ) , desc - > max_msg_size ,
GFP_KERNEL ) ;
if ( ! xfer - > rx . buf )
return - ENOMEM ;
xfer - > tx . buf = xfer - > rx . buf ;
init_completion ( & xfer - > done ) ;
}
spin_lock_init ( & info - > xfer_lock ) ;
return 0 ;
}
2020-03-27 14:34:26 +00:00
static int scmi_xfer_info_init ( struct scmi_info * sinfo )
{
int ret = __scmi_xfer_info_init ( sinfo , & sinfo - > tx_minfo ) ;
if ( ! ret & & idr_find ( & sinfo - > rx_idr , SCMI_PROTOCOL_BASE ) )
ret = __scmi_xfer_info_init ( sinfo , & sinfo - > rx_minfo ) ;
return ret ;
}
2020-01-31 10:58:13 +05:30
static int scmi_chan_setup ( struct scmi_info * info , struct device * dev ,
int prot_id , bool tx )
2017-03-28 11:36:07 +01:00
{
2019-07-08 09:40:44 +01:00
int ret , idx ;
2017-07-31 15:25:32 +01:00
struct scmi_chan_info * cinfo ;
2019-07-08 09:40:46 +01:00
struct idr * idr ;
2019-07-08 09:40:44 +01:00
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1 ;
2019-07-08 09:40:46 +01:00
idr = tx ? & info - > tx_idr : & info - > rx_idr ;
2017-03-28 11:36:07 +01:00
2019-11-06 11:32:05 +00:00
/* check if already allocated, used for multiple device per protocol */
cinfo = idr_find ( idr , prot_id ) ;
if ( cinfo )
return 0 ;
2020-01-31 10:58:13 +05:30
if ( ! info - > desc - > ops - > chan_available ( dev , idx ) ) {
2019-07-08 09:40:46 +01:00
cinfo = idr_find ( idr , SCMI_PROTOCOL_BASE ) ;
if ( unlikely ( ! cinfo ) ) /* Possible only if platform has no Rx */
return - EINVAL ;
2017-07-31 15:43:27 +01:00
goto idr_alloc ;
}
2017-07-31 15:25:32 +01:00
cinfo = devm_kzalloc ( info - > dev , sizeof ( * cinfo ) , GFP_KERNEL ) ;
if ( ! cinfo )
return - ENOMEM ;
cinfo - > dev = dev ;
2020-01-31 10:58:13 +05:30
ret = info - > desc - > ops - > chan_setup ( cinfo , info - > dev , tx ) ;
if ( ret )
2017-03-28 11:36:07 +01:00
return ret ;
2017-07-31 15:43:27 +01:00
idr_alloc :
2019-07-08 09:40:46 +01:00
ret = idr_alloc ( idr , cinfo , prot_id , prot_id + 1 , GFP_KERNEL ) ;
2017-07-31 15:43:27 +01:00
if ( ret ! = prot_id ) {
dev_err ( dev , " unable to allocate SCMI idr slot err %d \n " , ret ) ;
return ret ;
}
cinfo - > handle = & info - > handle ;
2017-03-28 11:36:07 +01:00
return 0 ;
}
2019-07-08 09:40:46 +01:00
static inline int
2020-01-31 10:58:13 +05:30
scmi_txrx_setup ( struct scmi_info * info , struct device * dev , int prot_id )
2019-07-08 09:40:46 +01:00
{
2020-01-31 10:58:13 +05:30
int ret = scmi_chan_setup ( info , dev , prot_id , true ) ;
2019-07-08 09:40:46 +01:00
if ( ! ret ) /* Rx is optional, hence no error check */
2020-01-31 10:58:13 +05:30
scmi_chan_setup ( info , dev , prot_id , false ) ;
2019-07-08 09:40:46 +01:00
return ret ;
}
2017-06-06 11:39:08 +01:00
static inline void
scmi_create_protocol_device ( struct device_node * np , struct scmi_info * info ,
2018-12-21 18:08:08 +00:00
int prot_id , const char * name )
2017-06-06 11:39:08 +01:00
{
struct scmi_device * sdev ;
2018-12-21 18:08:08 +00:00
sdev = scmi_device_create ( np , info - > dev , prot_id , name ) ;
2017-06-06 11:39:08 +01:00
if ( ! sdev ) {
dev_err ( info - > dev , " failed to create %d protocol device \n " ,
prot_id ) ;
return ;
}
2020-01-31 10:58:13 +05:30
if ( scmi_txrx_setup ( info , & sdev - > dev , prot_id ) ) {
2017-07-31 15:43:27 +01:00
dev_err ( & sdev - > dev , " failed to setup transport \n " ) ;
scmi_device_destroy ( sdev ) ;
2018-04-27 17:06:49 +03:00
return ;
2017-07-31 15:43:27 +01:00
}
2017-06-06 11:39:08 +01:00
/* setup handle now as the transport is ready */
scmi_set_handle ( sdev ) ;
}
2019-11-06 15:17:26 +00:00
# define MAX_SCMI_DEV_PER_PROTOCOL 2
struct scmi_prot_devnames {
int protocol_id ;
char * names [ MAX_SCMI_DEV_PER_PROTOCOL ] ;
} ;
static struct scmi_prot_devnames devnames [ ] = {
{ SCMI_PROTOCOL_POWER , { " genpd " } , } ,
2020-09-07 18:46:56 +01:00
{ SCMI_PROTOCOL_SYSTEM , { " syspower " } , } ,
2019-11-06 15:17:26 +00:00
{ SCMI_PROTOCOL_PERF , { " cpufreq " } , } ,
{ SCMI_PROTOCOL_CLOCK , { " clocks " } , } ,
2021-03-09 23:12:59 +00:00
{ SCMI_PROTOCOL_SENSOR , { " hwmon " , " iiodev " } , } ,
2019-11-06 15:17:26 +00:00
{ SCMI_PROTOCOL_RESET , { " reset " } , } ,
2020-11-19 19:10:48 +00:00
{ SCMI_PROTOCOL_VOLTAGE , { " regulator " } , } ,
2019-11-06 15:17:26 +00:00
} ;
static inline void
scmi_create_protocol_devices ( struct device_node * np , struct scmi_info * info ,
int prot_id )
{
int loop , cnt ;
for ( loop = 0 ; loop < ARRAY_SIZE ( devnames ) ; loop + + ) {
if ( devnames [ loop ] . protocol_id ! = prot_id )
continue ;
for ( cnt = 0 ; cnt < ARRAY_SIZE ( devnames [ loop ] . names ) ; cnt + + ) {
const char * name = devnames [ loop ] . names [ cnt ] ;
if ( name )
scmi_create_protocol_device ( np , info , prot_id ,
name ) ;
}
}
}
2017-03-28 11:36:07 +01:00
static int scmi_probe ( struct platform_device * pdev )
{
int ret ;
struct scmi_handle * handle ;
const struct scmi_desc * desc ;
struct scmi_info * info ;
struct device * dev = & pdev - > dev ;
2017-06-06 11:39:08 +01:00
struct device_node * child , * np = dev - > of_node ;
2017-03-28 11:36:07 +01:00
2019-03-22 16:55:03 -05:00
desc = of_device_get_match_data ( dev ) ;
if ( ! desc )
return - EINVAL ;
2017-03-28 11:36:07 +01:00
info = devm_kzalloc ( dev , sizeof ( * info ) , GFP_KERNEL ) ;
if ( ! info )
return - ENOMEM ;
info - > dev = dev ;
info - > desc = desc ;
INIT_LIST_HEAD ( & info - > node ) ;
2021-03-16 12:48:26 +00:00
idr_init ( & info - > protocols ) ;
mutex_init ( & info - > protocols_mtx ) ;
2017-03-28 11:36:07 +01:00
platform_set_drvdata ( pdev , info ) ;
2017-07-31 15:43:27 +01:00
idr_init ( & info - > tx_idr ) ;
2019-07-08 09:40:46 +01:00
idr_init ( & info - > rx_idr ) ;
2017-03-28 11:36:07 +01:00
handle = & info - > handle ;
handle - > dev = info - > dev ;
2017-06-06 11:16:15 +01:00
handle - > version = & info - > version ;
2021-03-16 12:48:28 +00:00
handle - > devm_protocol_get = scmi_devm_protocol_get ;
handle - > devm_protocol_put = scmi_devm_protocol_put ;
2017-03-28 11:36:07 +01:00
2020-01-31 10:58:13 +05:30
ret = scmi_txrx_setup ( info , dev , SCMI_PROTOCOL_BASE ) ;
2017-03-28 11:36:07 +01:00
if ( ret )
return ret ;
2020-03-27 14:34:26 +00:00
ret = scmi_xfer_info_init ( info ) ;
if ( ret )
return ret ;
2020-07-01 16:53:43 +01:00
if ( scmi_notification_init ( handle ) )
dev_err ( dev , " SCMI Notifications NOT available. \n " ) ;
2021-03-16 12:48:35 +00:00
/*
* Trigger SCMI Base protocol initialization .
* It ' s mandatory and won ' t be ever released / deinit until the
* SCMI stack is shutdown / unloaded as a whole .
*/
ret = scmi_protocol_acquire ( handle , SCMI_PROTOCOL_BASE ) ;
2017-06-06 11:16:15 +01:00
if ( ret ) {
2021-03-16 12:48:35 +00:00
dev_err ( dev , " unable to communicate with SCMI \n " ) ;
2017-06-06 11:16:15 +01:00
return ret ;
}
2017-03-28 11:36:07 +01:00
mutex_lock ( & scmi_list_mutex ) ;
list_add_tail ( & info - > node , & scmi_list ) ;
mutex_unlock ( & scmi_list_mutex ) ;
2017-06-06 11:39:08 +01:00
for_each_available_child_of_node ( np , child ) {
u32 prot_id ;
if ( of_property_read_u32 ( child , " reg " , & prot_id ) )
continue ;
2018-05-09 17:52:06 +01:00
if ( ! FIELD_FIT ( MSG_PROTOCOL_ID_MASK , prot_id ) )
dev_err ( dev , " Out of range protocol %d \n " , prot_id ) ;
2017-06-06 11:39:08 +01:00
if ( ! scmi_is_protocol_implemented ( handle , prot_id ) ) {
dev_err ( dev , " SCMI protocol %d not implemented \n " ,
prot_id ) ;
continue ;
}
2019-11-06 15:17:26 +00:00
scmi_create_protocol_devices ( child , info , prot_id ) ;
2017-06-06 11:39:08 +01:00
}
2017-03-28 11:36:07 +01:00
return 0 ;
}
2020-01-31 10:58:13 +05:30
void scmi_free_channel ( struct scmi_chan_info * cinfo , struct idr * idr , int id )
2019-07-08 09:40:42 +01:00
{
idr_remove ( idr , id ) ;
}
static int scmi_remove ( struct platform_device * pdev )
{
int ret = 0 ;
struct scmi_info * info = platform_get_drvdata ( pdev ) ;
struct idr * idr = & info - > tx_idr ;
mutex_lock ( & scmi_list_mutex ) ;
if ( info - > users )
ret = - EBUSY ;
else
list_del ( & info - > node ) ;
mutex_unlock ( & scmi_list_mutex ) ;
if ( ret )
return ret ;
2021-01-12 19:13:26 +00:00
scmi_notification_exit ( & info - > handle ) ;
2021-03-16 12:48:26 +00:00
mutex_lock ( & info - > protocols_mtx ) ;
idr_destroy ( & info - > protocols ) ;
mutex_unlock ( & info - > protocols_mtx ) ;
2019-07-08 09:40:42 +01:00
/* Safe to free channels since no more users */
2020-01-31 10:58:13 +05:30
ret = idr_for_each ( idr , info - > desc - > ops - > chan_free , idr ) ;
2019-07-08 09:40:42 +01:00
idr_destroy ( & info - > tx_idr ) ;
2019-07-08 09:40:46 +01:00
idr = & info - > rx_idr ;
2020-01-31 10:58:13 +05:30
ret = idr_for_each ( idr , info - > desc - > ops - > chan_free , idr ) ;
2019-07-08 09:40:46 +01:00
idr_destroy ( & info - > rx_idr ) ;
2019-07-08 09:40:42 +01:00
return ret ;
}
2019-10-22 10:09:01 +01:00
static ssize_t protocol_version_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %u.%u \n " , info - > version . major_ver ,
info - > version . minor_ver ) ;
}
static DEVICE_ATTR_RO ( protocol_version ) ;
static ssize_t firmware_version_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " 0x%x \n " , info - > version . impl_ver ) ;
}
static DEVICE_ATTR_RO ( firmware_version ) ;
static ssize_t vendor_id_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %s \n " , info - > version . vendor_id ) ;
}
static DEVICE_ATTR_RO ( vendor_id ) ;
static ssize_t sub_vendor_id_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct scmi_info * info = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %s \n " , info - > version . sub_vendor_id ) ;
}
static DEVICE_ATTR_RO ( sub_vendor_id ) ;
static struct attribute * versions_attrs [ ] = {
& dev_attr_firmware_version . attr ,
& dev_attr_protocol_version . attr ,
& dev_attr_vendor_id . attr ,
& dev_attr_sub_vendor_id . attr ,
NULL ,
} ;
ATTRIBUTE_GROUPS ( versions ) ;
2019-07-08 09:40:42 +01:00
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match [ ] = {
2020-01-31 10:58:13 +05:30
{ . compatible = " arm,scmi " , . data = & scmi_mailbox_desc } ,
2020-06-25 11:19:37 +01:00
# ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
2020-03-08 21:24:39 +08:00
{ . compatible = " arm,scmi-smc " , . data = & scmi_smc_desc } ,
# endif
2019-07-08 09:40:42 +01:00
{ /* Sentinel */ } ,
} ;
MODULE_DEVICE_TABLE ( of , scmi_of_match ) ;
2017-03-28 11:36:07 +01:00
static struct platform_driver scmi_driver = {
. driver = {
. name = " arm-scmi " ,
. of_match_table = scmi_of_match ,
2019-10-22 10:09:01 +01:00
. dev_groups = versions_groups ,
2017-03-28 11:36:07 +01:00
} ,
. probe = scmi_probe ,
. remove = scmi_remove ,
} ;
2020-09-07 12:00:04 +01:00
static int __init scmi_driver_init ( void )
{
scmi_bus_init ( ) ;
2021-03-16 12:48:26 +00:00
scmi_base_register ( ) ;
2020-09-07 12:06:01 +01:00
scmi_clock_register ( ) ;
scmi_perf_register ( ) ;
scmi_power_register ( ) ;
scmi_reset_register ( ) ;
scmi_sensors_register ( ) ;
2020-11-19 19:10:47 +00:00
scmi_voltage_register ( ) ;
2020-09-07 12:06:01 +01:00
scmi_system_register ( ) ;
2020-09-07 12:00:04 +01:00
return platform_driver_register ( & scmi_driver ) ;
}
2020-09-07 12:06:01 +01:00
subsys_initcall ( scmi_driver_init ) ;
2020-09-07 12:00:04 +01:00
static void __exit scmi_driver_exit ( void )
{
2021-03-16 12:48:26 +00:00
scmi_base_unregister ( ) ;
2020-09-07 12:00:04 +01:00
2020-09-07 12:06:01 +01:00
scmi_clock_unregister ( ) ;
scmi_perf_unregister ( ) ;
scmi_power_unregister ( ) ;
scmi_reset_unregister ( ) ;
scmi_sensors_unregister ( ) ;
2020-11-19 19:10:47 +00:00
scmi_voltage_unregister ( ) ;
2020-09-07 12:06:01 +01:00
scmi_system_unregister ( ) ;
2021-03-16 12:48:26 +00:00
scmi_bus_exit ( ) ;
2020-09-07 12:00:04 +01:00
platform_driver_unregister ( & scmi_driver ) ;
}
module_exit ( scmi_driver_exit ) ;
2017-03-28 11:36:07 +01:00
MODULE_ALIAS ( " platform: arm-scmi " ) ;
MODULE_AUTHOR ( " Sudeep Holla <sudeep.holla@arm.com> " ) ;
MODULE_DESCRIPTION ( " ARM SCMI protocol driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;