2017-03-28 11:36:07 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface ( SCMI ) Message Protocol driver
*
* SCMI Message Protocol is used between the System Control Processor ( SCP )
* and the Application Processors ( AP ) . The Message Handling Unit ( MHU )
* provides a mechanism for inter - processor communication between SCP ' s
* Cortex M3 and AP .
*
* SCP offers control and management of the core / cluster power states ,
* various power domain DVFS including the core / cluster , certain system
* clocks configuration , thermal sensors and many others .
*
* Copyright ( C ) 2018 ARM Ltd .
*/
# include <linux/bitmap.h>
# include <linux/export.h>
# include <linux/io.h>
# include <linux/kernel.h>
2017-07-20 14:39:57 +01:00
# include <linux/ktime.h>
2017-03-28 11:36:07 +01:00
# include <linux/mailbox_client.h>
# include <linux/module.h>
# include <linux/of_address.h>
# include <linux/of_device.h>
2017-07-20 14:39:57 +01:00
# include <linux/processor.h>
2017-03-28 11:36:07 +01:00
# include <linux/semaphore.h>
# include <linux/slab.h>
# include "common.h"
2018-05-09 17:52:06 +01:00
# define MSG_ID_MASK GENMASK(7, 0)
2019-07-08 09:40:52 +01:00
# define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
2018-05-09 17:52:06 +01:00
# define MSG_TYPE_MASK GENMASK(9, 8)
2019-07-08 09:40:52 +01:00
# define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
# define MSG_TYPE_COMMAND 0
# define MSG_TYPE_DELAYED_RESP 2
# define MSG_TYPE_NOTIFICATION 3
2018-05-09 17:52:06 +01:00
# define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
2019-07-08 09:40:52 +01:00
# define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
2018-05-09 17:52:06 +01:00
# define MSG_TOKEN_ID_MASK GENMASK(27, 18)
# define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
# define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
2017-03-28 11:36:07 +01:00
enum scmi_error_codes {
SCMI_SUCCESS = 0 , /* Success */
SCMI_ERR_SUPPORT = - 1 , /* Not supported */
SCMI_ERR_PARAMS = - 2 , /* Invalid Parameters */
SCMI_ERR_ACCESS = - 3 , /* Invalid access/permission denied */
SCMI_ERR_ENTRY = - 4 , /* Not found */
SCMI_ERR_RANGE = - 5 , /* Value out of range */
SCMI_ERR_BUSY = - 6 , /* Device busy */
SCMI_ERR_COMMS = - 7 , /* Communication Error */
SCMI_ERR_GENERIC = - 8 , /* Generic Error */
SCMI_ERR_HARDWARE = - 9 , /* Hardware Error */
SCMI_ERR_PROTOCOL = - 10 , /* Protocol Error */
SCMI_ERR_MAX
} ;
2018-05-09 17:52:06 +01:00
/* List of all SCMI devices active in system */
2017-03-28 11:36:07 +01:00
static LIST_HEAD ( scmi_list ) ;
/* Protection for the entire list */
static DEFINE_MUTEX ( scmi_list_mutex ) ;
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
* @ xfer_block : Preallocated Message array
* @ xfer_alloc_table : Bitmap table for allocated messages .
* Index of this bitmap table is also used for message
* sequence identifier .
* @ xfer_lock : Protection for message allocation
*/
struct scmi_xfers_info {
struct scmi_xfer * xfer_block ;
unsigned long * xfer_alloc_table ;
spinlock_t xfer_lock ;
} ;
/**
* struct scmi_desc - Description of SoC integration
*
* @ max_rx_timeout_ms : Timeout for communication with SoC ( in Milliseconds )
* @ max_msg : Maximum number of messages that can be pending
* simultaneously in the system
* @ max_msg_size : Maximum size of data per message that can be handled .
*/
struct scmi_desc {
int max_rx_timeout_ms ;
int max_msg ;
int max_msg_size ;
} ;
2017-07-31 15:25:32 +01:00
/**
2019-07-08 09:40:38 +01:00
* struct scmi_chan_info - Structure representing a SCMI channel information
2017-07-31 15:25:32 +01:00
*
* @ cl : Mailbox Client
* @ chan : Transmit / Receive mailbox channel
* @ payload : Transmit / Receive mailbox channel payload area
* @ dev : Reference to device in the SCMI hierarchy corresponding to this
* channel
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2017-07-31 15:25:32 +01:00
*/
struct scmi_chan_info {
struct mbox_client cl ;
struct mbox_chan * chan ;
void __iomem * payload ;
struct device * dev ;
2017-07-31 15:43:27 +01:00
struct scmi_handle * handle ;
2017-07-31 15:25:32 +01:00
} ;
2017-03-28 11:36:07 +01:00
/**
2018-05-09 17:52:06 +01:00
* struct scmi_info - Structure representing a SCMI instance
2017-03-28 11:36:07 +01:00
*
* @ dev : Device pointer
* @ desc : SoC description for this instance
* @ handle : Instance of SCMI handle to send to clients
2017-06-06 11:16:15 +01:00
* @ version : SCMI revision information containing protocol version ,
* implementation version and ( sub - ) vendor identification .
2019-07-08 09:40:48 +01:00
* @ tx_minfo : Universal Transmit Message management info
2019-07-08 09:40:44 +01:00
* @ tx_idr : IDR object to map protocol id to Tx channel info pointer
2019-07-08 09:40:46 +01:00
* @ rx_idr : IDR object to map protocol id to Rx channel info pointer
2018-05-09 17:52:06 +01:00
* @ protocols_imp : List of protocols implemented , currently maximum of
2017-06-06 11:16:15 +01:00
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
2018-05-09 17:52:06 +01:00
* @ node : List head
2017-03-28 11:36:07 +01:00
* @ users : Number of users of this instance
*/
struct scmi_info {
struct device * dev ;
const struct scmi_desc * desc ;
2017-06-06 11:16:15 +01:00
struct scmi_revision_info version ;
2017-03-28 11:36:07 +01:00
struct scmi_handle handle ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info tx_minfo ;
2017-07-31 15:43:27 +01:00
struct idr tx_idr ;
2019-07-08 09:40:46 +01:00
struct idr rx_idr ;
2017-06-06 11:16:15 +01:00
u8 * protocols_imp ;
2017-03-28 11:36:07 +01:00
struct list_head node ;
int users ;
} ;
2017-07-31 15:25:32 +01:00
# define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
2017-03-28 11:36:07 +01:00
# define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
/*
* SCMI specification requires all parameters , message headers , return
* arguments or any protocol data to be expressed in little endian
* format only .
*/
struct scmi_shared_mem {
__le32 reserved ;
__le32 channel_status ;
# define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
# define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
__le32 reserved1 [ 2 ] ;
__le32 flags ;
# define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
__le32 length ;
__le32 msg_header ;
u8 msg_payload [ 0 ] ;
} ;
static const int scmi_linux_errmap [ ] = {
/* better than switch case as long as return value is continuous */
0 , /* SCMI_SUCCESS */
- EOPNOTSUPP , /* SCMI_ERR_SUPPORT */
- EINVAL , /* SCMI_ERR_PARAM */
- EACCES , /* SCMI_ERR_ACCESS */
- ENOENT , /* SCMI_ERR_ENTRY */
- ERANGE , /* SCMI_ERR_RANGE */
- EBUSY , /* SCMI_ERR_BUSY */
- ECOMM , /* SCMI_ERR_COMMS */
- EIO , /* SCMI_ERR_GENERIC */
- EREMOTEIO , /* SCMI_ERR_HARDWARE */
- EPROTO , /* SCMI_ERR_PROTOCOL */
} ;
static inline int scmi_to_linux_errno ( int errno )
{
if ( errno < SCMI_SUCCESS & & errno > SCMI_ERR_MAX )
return scmi_linux_errmap [ - errno ] ;
return - EIO ;
}
/**
* scmi_dump_header_dbg ( ) - Helper to dump a message header .
*
* @ dev : Device pointer corresponding to the SCMI entity
* @ hdr : pointer to header .
*/
static inline void scmi_dump_header_dbg ( struct device * dev ,
struct scmi_msg_hdr * hdr )
{
2019-07-08 09:40:40 +01:00
dev_dbg ( dev , " Message ID: %x Sequence ID: %x Protocol: %x \n " ,
2017-03-28 11:36:07 +01:00
hdr - > id , hdr - > seq , hdr - > protocol_id ) ;
}
static void scmi_fetch_response ( struct scmi_xfer * xfer ,
struct scmi_shared_mem __iomem * mem )
{
xfer - > hdr . status = ioread32 ( mem - > msg_payload ) ;
2019-07-08 09:40:38 +01:00
/* Skip the length of header and status in payload area i.e 8 bytes */
2017-03-28 11:36:07 +01:00
xfer - > rx . len = min_t ( size_t , xfer - > rx . len , ioread32 ( & mem - > length ) - 8 ) ;
/* Take a copy to the rx buffer.. */
memcpy_fromio ( xfer - > rx . buf , mem - > msg_payload + 4 , xfer - > rx . len ) ;
}
/**
* pack_scmi_header ( ) - packs and returns 32 - bit header
*
* @ hdr : pointer to header containing all the information on message id ,
* protocol id and sequence id .
2018-05-09 17:52:06 +01:00
*
2019-07-08 09:40:40 +01:00
* Return : 32 - bit packed message header to be sent to the platform .
2017-03-28 11:36:07 +01:00
*/
static inline u32 pack_scmi_header ( struct scmi_msg_hdr * hdr )
{
2018-05-09 17:52:06 +01:00
return FIELD_PREP ( MSG_ID_MASK , hdr - > id ) |
FIELD_PREP ( MSG_TOKEN_ID_MASK , hdr - > seq ) |
FIELD_PREP ( MSG_PROTOCOL_ID_MASK , hdr - > protocol_id ) ;
2017-03-28 11:36:07 +01:00
}
2019-07-08 09:40:52 +01:00
/**
* unpack_scmi_header ( ) - unpacks and records message and protocol id
*
* @ msg_hdr : 32 - bit packed message header sent from the platform
* @ hdr : pointer to header to fetch message and protocol id .
*/
static inline void unpack_scmi_header ( u32 msg_hdr , struct scmi_msg_hdr * hdr )
{
hdr - > id = MSG_XTRACT_ID ( msg_hdr ) ;
hdr - > protocol_id = MSG_XTRACT_PROT_ID ( msg_hdr ) ;
}
2017-03-28 11:36:07 +01:00
/**
* scmi_tx_prepare ( ) - mailbox client callback to prepare for the transfer
*
* @ cl : client pointer
* @ m : mailbox message
*
* This function prepares the shared memory which contains the header and the
* payload .
*/
static void scmi_tx_prepare ( struct mbox_client * cl , void * m )
{
struct scmi_xfer * t = m ;
2017-07-31 15:25:32 +01:00
struct scmi_chan_info * cinfo = client_to_scmi_chan_info ( cl ) ;
struct scmi_shared_mem __iomem * mem = cinfo - > payload ;
2017-03-28 11:36:07 +01:00
2019-07-08 15:48:36 +01:00
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same , wait
* until it releases the shared memory , otherwise we may endup
* overwriting its response with new message payload or vice - versa
*/
spin_until_cond ( ioread32 ( & mem - > channel_status ) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE ) ;
2017-03-28 11:36:07 +01:00
/* Mark channel busy + clear error */
iowrite32 ( 0x0 , & mem - > channel_status ) ;
iowrite32 ( t - > hdr . poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED ,
& mem - > flags ) ;
iowrite32 ( sizeof ( mem - > msg_header ) + t - > tx . len , & mem - > length ) ;
iowrite32 ( pack_scmi_header ( & t - > hdr ) , & mem - > msg_header ) ;
if ( t - > tx . buf )
memcpy_toio ( mem - > msg_payload , t - > tx . buf , t - > tx . len ) ;
}
/**
2018-05-09 17:52:06 +01:00
* scmi_xfer_get ( ) - Allocate one message
2017-03-28 11:36:07 +01:00
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2019-07-08 09:40:48 +01:00
* @ minfo : Pointer to Tx / Rx Message management info based on channel type
2017-03-28 11:36:07 +01:00
*
2019-07-08 09:40:40 +01:00
* Helper function which is used by various message functions that are
2017-03-28 11:36:07 +01:00
* exposed to clients of this driver for allocating a message traffic event .
*
* This function can sleep depending on pending requests already in the system
* for the SCMI entity . Further , this also holds a spinlock to maintain
* integrity of internal data structures .
*
* Return : 0 if all went fine , else corresponding error .
*/
2019-07-08 09:40:48 +01:00
static struct scmi_xfer * scmi_xfer_get ( const struct scmi_handle * handle ,
struct scmi_xfers_info * minfo )
2017-03-28 11:36:07 +01:00
{
u16 xfer_id ;
struct scmi_xfer * xfer ;
unsigned long flags , bit_pos ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
/* Keep the locked section as small as possible */
spin_lock_irqsave ( & minfo - > xfer_lock , flags ) ;
bit_pos = find_first_zero_bit ( minfo - > xfer_alloc_table ,
info - > desc - > max_msg ) ;
if ( bit_pos = = info - > desc - > max_msg ) {
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
return ERR_PTR ( - ENOMEM ) ;
}
set_bit ( bit_pos , minfo - > xfer_alloc_table ) ;
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
xfer_id = bit_pos ;
xfer = & minfo - > xfer_block [ xfer_id ] ;
xfer - > hdr . seq = xfer_id ;
reinit_completion ( & xfer - > done ) ;
return xfer ;
}
/**
2019-07-08 09:40:48 +01:00
* __scmi_xfer_put ( ) - Release a message
2017-03-28 11:36:07 +01:00
*
2019-07-08 09:40:48 +01:00
* @ minfo : Pointer to Tx / Rx Message management info based on channel type
2018-05-09 17:52:06 +01:00
* @ xfer : message that was reserved by scmi_xfer_get
2017-03-28 11:36:07 +01:00
*
* This holds a spinlock to maintain integrity of internal data structures .
*/
2019-07-08 09:40:48 +01:00
static void
__scmi_xfer_put ( struct scmi_xfers_info * minfo , struct scmi_xfer * xfer )
2017-03-28 11:36:07 +01:00
{
unsigned long flags ;
/*
* Keep the locked section as small as possible
* NOTE : we might escape with smp_mb and no lock here . .
* but just be conservative and symmetric .
*/
spin_lock_irqsave ( & minfo - > xfer_lock , flags ) ;
clear_bit ( xfer - > hdr . seq , minfo - > xfer_alloc_table ) ;
spin_unlock_irqrestore ( & minfo - > xfer_lock , flags ) ;
}
2019-07-08 09:40:42 +01:00
/**
* scmi_rx_callback ( ) - mailbox client callback for receive messages
*
* @ cl : client pointer
* @ m : mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer .
*
* NOTE : This function will be invoked in IRQ context , hence should be
* as optimal as possible .
*/
static void scmi_rx_callback ( struct mbox_client * cl , void * m )
{
2019-07-08 09:40:54 +01:00
u8 msg_type ;
u32 msg_hdr ;
2019-07-08 09:40:42 +01:00
u16 xfer_id ;
struct scmi_xfer * xfer ;
struct scmi_chan_info * cinfo = client_to_scmi_chan_info ( cl ) ;
struct device * dev = cinfo - > dev ;
struct scmi_info * info = handle_to_scmi_info ( cinfo - > handle ) ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info * minfo = & info - > tx_minfo ;
2019-07-08 09:40:42 +01:00
struct scmi_shared_mem __iomem * mem = cinfo - > payload ;
2019-07-08 09:40:54 +01:00
msg_hdr = ioread32 ( & mem - > msg_header ) ;
msg_type = MSG_XTRACT_TYPE ( msg_hdr ) ;
xfer_id = MSG_XTRACT_TOKEN ( msg_hdr ) ;
if ( msg_type = = MSG_TYPE_NOTIFICATION )
return ; /* Notifications not yet supported */
2019-07-08 09:40:42 +01:00
/* Are we even expecting this? */
if ( ! test_bit ( xfer_id , minfo - > xfer_alloc_table ) ) {
dev_err ( dev , " message for %d is not expected! \n " , xfer_id ) ;
return ;
}
xfer = & minfo - > xfer_block [ xfer_id ] ;
scmi_dump_header_dbg ( dev , & xfer - > hdr ) ;
scmi_fetch_response ( xfer , mem ) ;
2019-07-08 09:40:54 +01:00
if ( msg_type = = MSG_TYPE_DELAYED_RESP )
complete ( xfer - > async_done ) ;
else
complete ( & xfer - > done ) ;
2019-07-08 09:40:42 +01:00
}
2019-07-08 09:40:48 +01:00
/**
* scmi_xfer_put ( ) - Release a transmit message
*
* @ handle : Pointer to SCMI entity handle
* @ xfer : message that was reserved by scmi_xfer_get
*/
void scmi_xfer_put ( const struct scmi_handle * handle , struct scmi_xfer * xfer )
{
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
__scmi_xfer_put ( & info - > tx_minfo , xfer ) ;
}
2017-07-20 14:39:57 +01:00
static bool
2017-07-31 15:25:32 +01:00
scmi_xfer_poll_done ( const struct scmi_chan_info * cinfo , struct scmi_xfer * xfer )
2017-07-20 14:39:57 +01:00
{
2017-07-31 15:25:32 +01:00
struct scmi_shared_mem __iomem * mem = cinfo - > payload ;
u16 xfer_id = MSG_XTRACT_TOKEN ( ioread32 ( & mem - > msg_header ) ) ;
2017-07-20 14:39:57 +01:00
if ( xfer - > hdr . seq ! = xfer_id )
return false ;
2017-07-31 15:25:32 +01:00
return ioread32 ( & mem - > channel_status ) &
2017-07-20 14:39:57 +01:00
( SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE ) ;
}
# define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
2017-07-31 15:25:32 +01:00
static bool scmi_xfer_done_no_timeout ( const struct scmi_chan_info * cinfo ,
2017-07-20 14:39:57 +01:00
struct scmi_xfer * xfer , ktime_t stop )
{
ktime_t __cur = ktime_get ( ) ;
2017-07-31 15:25:32 +01:00
return scmi_xfer_poll_done ( cinfo , xfer ) | | ktime_after ( __cur , stop ) ;
2017-07-20 14:39:57 +01:00
}
2017-03-28 11:36:07 +01:00
/**
* scmi_do_xfer ( ) - Do one transfer
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2017-03-28 11:36:07 +01:00
* @ xfer : Transfer to initiate and wait for response
*
* Return : - ETIMEDOUT in case of no response , if transmit error ,
2018-05-09 17:52:06 +01:00
* return corresponding error , else if all goes well ,
* return 0.
2017-03-28 11:36:07 +01:00
*/
int scmi_do_xfer ( const struct scmi_handle * handle , struct scmi_xfer * xfer )
{
int ret ;
int timeout ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
struct device * dev = info - > dev ;
2017-07-31 15:43:27 +01:00
struct scmi_chan_info * cinfo ;
cinfo = idr_find ( & info - > tx_idr , xfer - > hdr . protocol_id ) ;
if ( unlikely ( ! cinfo ) )
return - EINVAL ;
2017-03-28 11:36:07 +01:00
2017-07-31 15:25:32 +01:00
ret = mbox_send_message ( cinfo - > chan , xfer ) ;
2017-03-28 11:36:07 +01:00
if ( ret < 0 ) {
dev_dbg ( dev , " mbox send fail %d \n " , ret ) ;
return ret ;
}
/* mbox_send_message returns non-negative value on success, so reset */
ret = 0 ;
2017-07-20 14:39:57 +01:00
if ( xfer - > hdr . poll_completion ) {
ktime_t stop = ktime_add_ns ( ktime_get ( ) , SCMI_MAX_POLL_TO_NS ) ;
2017-07-31 15:25:32 +01:00
spin_until_cond ( scmi_xfer_done_no_timeout ( cinfo , xfer , stop ) ) ;
2017-07-20 14:39:57 +01:00
if ( ktime_before ( ktime_get ( ) , stop ) )
2017-07-31 15:25:32 +01:00
scmi_fetch_response ( xfer , cinfo - > payload ) ;
2017-07-20 14:39:57 +01:00
else
ret = - ETIMEDOUT ;
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies ( info - > desc - > max_rx_timeout_ms ) ;
if ( ! wait_for_completion_timeout ( & xfer - > done , timeout ) ) {
dev_err ( dev , " mbox timed out in resp(caller: %pS) \n " ,
( void * ) _RET_IP_ ) ;
ret = - ETIMEDOUT ;
}
2017-03-28 11:36:07 +01:00
}
2017-07-20 14:39:57 +01:00
if ( ! ret & & xfer - > hdr . status )
ret = scmi_to_linux_errno ( xfer - > hdr . status ) ;
2017-03-28 11:36:07 +01:00
/*
* NOTE : we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself .
* Unfortunately , we have to kick the mailbox framework after we have
* received our message .
*/
2017-07-31 15:25:32 +01:00
mbox_client_txdone ( cinfo - > chan , ret ) ;
2017-03-28 11:36:07 +01:00
return ret ;
}
2019-07-08 09:40:54 +01:00
# define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
* scmi_do_xfer_with_response ( ) - Do one transfer and wait until the delayed
* response is received
*
* @ handle : Pointer to SCMI entity handle
* @ xfer : Transfer to initiate and wait for response
*
* Return : - ETIMEDOUT in case of no delayed response , if transmit error ,
* return corresponding error , else if all goes well , return 0.
*/
int scmi_do_xfer_with_response ( const struct scmi_handle * handle ,
struct scmi_xfer * xfer )
{
int ret , timeout = msecs_to_jiffies ( SCMI_MAX_RESPONSE_TIMEOUT ) ;
DECLARE_COMPLETION_ONSTACK ( async_response ) ;
xfer - > async_done = & async_response ;
ret = scmi_do_xfer ( handle , xfer ) ;
if ( ! ret & & ! wait_for_completion_timeout ( xfer - > async_done , timeout ) )
ret = - ETIMEDOUT ;
xfer - > async_done = NULL ;
return ret ;
}
2017-03-28 11:36:07 +01:00
/**
2019-07-08 09:40:48 +01:00
* scmi_xfer_get_init ( ) - Allocate and initialise one message for transmit
2017-03-28 11:36:07 +01:00
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
2017-03-28 11:36:07 +01:00
* @ msg_id : Message identifier
2018-05-09 17:52:06 +01:00
* @ prot_id : Protocol identifier for the message
2017-03-28 11:36:07 +01:00
* @ tx_size : transmit message size
* @ rx_size : receive message size
* @ p : pointer to the allocated and initialised message
*
2018-05-09 17:52:06 +01:00
* This function allocates the message using @ scmi_xfer_get and
2017-03-28 11:36:07 +01:00
* initialise the header .
*
* Return : 0 if all went fine with @ p pointing to message , else
* corresponding error .
*/
2018-05-09 17:52:06 +01:00
int scmi_xfer_get_init ( const struct scmi_handle * handle , u8 msg_id , u8 prot_id ,
2017-03-28 11:36:07 +01:00
size_t tx_size , size_t rx_size , struct scmi_xfer * * p )
{
int ret ;
struct scmi_xfer * xfer ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info * minfo = & info - > tx_minfo ;
2017-03-28 11:36:07 +01:00
struct device * dev = info - > dev ;
/* Ensure we have sane transfer sizes */
if ( rx_size > info - > desc - > max_msg_size | |
tx_size > info - > desc - > max_msg_size )
return - ERANGE ;
2019-07-08 09:40:48 +01:00
xfer = scmi_xfer_get ( handle , minfo ) ;
2017-03-28 11:36:07 +01:00
if ( IS_ERR ( xfer ) ) {
ret = PTR_ERR ( xfer ) ;
dev_err ( dev , " failed to get free message slot(%d) \n " , ret ) ;
return ret ;
}
xfer - > tx . len = tx_size ;
xfer - > rx . len = rx_size ? : info - > desc - > max_msg_size ;
xfer - > hdr . id = msg_id ;
xfer - > hdr . protocol_id = prot_id ;
xfer - > hdr . poll_completion = false ;
* p = xfer ;
2018-05-09 17:52:06 +01:00
2017-03-28 11:36:07 +01:00
return 0 ;
}
2017-06-06 11:16:15 +01:00
/**
* scmi_version_get ( ) - command to get the revision of the SCMI entity
*
2018-05-09 17:52:06 +01:00
* @ handle : Pointer to SCMI entity handle
* @ protocol : Protocol identifier for the message
* @ version : Holds returned version of protocol .
2017-06-06 11:16:15 +01:00
*
* Updates the SCMI information in the internal data structure .
*
* Return : 0 if all went fine , else return appropriate error .
*/
int scmi_version_get ( const struct scmi_handle * handle , u8 protocol ,
u32 * version )
{
int ret ;
__le32 * rev_info ;
struct scmi_xfer * t ;
2018-05-09 17:52:06 +01:00
ret = scmi_xfer_get_init ( handle , PROTOCOL_VERSION , protocol , 0 ,
2017-06-06 11:16:15 +01:00
sizeof ( * version ) , & t ) ;
if ( ret )
return ret ;
ret = scmi_do_xfer ( handle , t ) ;
if ( ! ret ) {
rev_info = t - > rx . buf ;
* version = le32_to_cpu ( * rev_info ) ;
}
2018-05-09 17:52:06 +01:00
scmi_xfer_put ( handle , t ) ;
2017-06-06 11:16:15 +01:00
return ret ;
}
void scmi_setup_protocol_implemented ( const struct scmi_handle * handle ,
u8 * prot_imp )
{
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
info - > protocols_imp = prot_imp ;
}
2017-06-06 11:39:08 +01:00
static bool
scmi_is_protocol_implemented ( const struct scmi_handle * handle , u8 prot_id )
{
int i ;
struct scmi_info * info = handle_to_scmi_info ( handle ) ;
if ( ! info - > protocols_imp )
return false ;
for ( i = 0 ; i < MAX_PROTOCOLS_IMP ; i + + )
if ( info - > protocols_imp [ i ] = = prot_id )
return true ;
return false ;
}
2017-03-28 11:36:07 +01:00
/**
2018-05-09 17:52:06 +01:00
* scmi_handle_get ( ) - Get the SCMI handle for a device
2017-03-28 11:36:07 +01:00
*
* @ dev : pointer to device for which we want SCMI handle
*
* NOTE : The function does not track individual clients of the framework
2018-05-09 17:52:06 +01:00
* and is expected to be maintained by caller of SCMI protocol library .
2017-03-28 11:36:07 +01:00
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return : pointer to handle if successful , NULL on error
*/
struct scmi_handle * scmi_handle_get ( struct device * dev )
{
struct list_head * p ;
struct scmi_info * info ;
struct scmi_handle * handle = NULL ;
mutex_lock ( & scmi_list_mutex ) ;
list_for_each ( p , & scmi_list ) {
info = list_entry ( p , struct scmi_info , node ) ;
if ( dev - > parent = = info - > dev ) {
handle = & info - > handle ;
info - > users + + ;
break ;
}
}
mutex_unlock ( & scmi_list_mutex ) ;
return handle ;
}
/**
* scmi_handle_put ( ) - Release the handle acquired by scmi_handle_get
*
* @ handle : handle acquired by scmi_handle_get
*
* NOTE : The function does not track individual clients of the framework
2018-05-09 17:52:06 +01:00
* and is expected to be maintained by caller of SCMI protocol library .
2017-03-28 11:36:07 +01:00
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return : 0 is successfully released
* if null was passed , it returns - EINVAL ;
*/
int scmi_handle_put ( const struct scmi_handle * handle )
{
struct scmi_info * info ;
if ( ! handle )
return - EINVAL ;
info = handle_to_scmi_info ( handle ) ;
mutex_lock ( & scmi_list_mutex ) ;
if ( ! WARN_ON ( ! info - > users ) )
info - > users - - ;
mutex_unlock ( & scmi_list_mutex ) ;
return 0 ;
}
static int scmi_xfer_info_init ( struct scmi_info * sinfo )
{
int i ;
struct scmi_xfer * xfer ;
struct device * dev = sinfo - > dev ;
const struct scmi_desc * desc = sinfo - > desc ;
2019-07-08 09:40:48 +01:00
struct scmi_xfers_info * info = & sinfo - > tx_minfo ;
2017-03-28 11:36:07 +01:00
/* Pre-allocated messages, no more than what hdr.seq can support */
2018-05-09 17:52:06 +01:00
if ( WARN_ON ( desc - > max_msg > = MSG_TOKEN_MAX ) ) {
dev_err ( dev , " Maximum message of %d exceeds supported %ld \n " ,
desc - > max_msg , MSG_TOKEN_MAX ) ;
2017-03-28 11:36:07 +01:00
return - EINVAL ;
}
info - > xfer_block = devm_kcalloc ( dev , desc - > max_msg ,
sizeof ( * info - > xfer_block ) , GFP_KERNEL ) ;
if ( ! info - > xfer_block )
return - ENOMEM ;
info - > xfer_alloc_table = devm_kcalloc ( dev , BITS_TO_LONGS ( desc - > max_msg ) ,
sizeof ( long ) , GFP_KERNEL ) ;
if ( ! info - > xfer_alloc_table )
return - ENOMEM ;
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for ( i = 0 , xfer = info - > xfer_block ; i < desc - > max_msg ; i + + , xfer + + ) {
xfer - > rx . buf = devm_kcalloc ( dev , sizeof ( u8 ) , desc - > max_msg_size ,
GFP_KERNEL ) ;
if ( ! xfer - > rx . buf )
return - ENOMEM ;
xfer - > tx . buf = xfer - > rx . buf ;
init_completion ( & xfer - > done ) ;
}
spin_lock_init ( & info - > xfer_lock ) ;
return 0 ;
}
2019-07-08 09:40:44 +01:00
static int scmi_mailbox_check ( struct device_node * np , int idx )
2017-03-28 11:36:07 +01:00
{
2019-07-08 09:40:44 +01:00
return of_parse_phandle_with_args ( np , " mboxes " , " #mbox-cells " ,
idx , NULL ) ;
2017-03-28 11:36:07 +01:00
}
2019-07-08 09:40:44 +01:00
static int scmi_mbox_chan_setup ( struct scmi_info * info , struct device * dev ,
int prot_id , bool tx )
2017-03-28 11:36:07 +01:00
{
2019-07-08 09:40:44 +01:00
int ret , idx ;
2017-03-28 11:36:07 +01:00
struct resource res ;
resource_size_t size ;
struct device_node * shmem , * np = dev - > of_node ;
2017-07-31 15:25:32 +01:00
struct scmi_chan_info * cinfo ;
2017-03-28 11:36:07 +01:00
struct mbox_client * cl ;
2019-07-08 09:40:46 +01:00
struct idr * idr ;
2019-07-08 09:40:44 +01:00
const char * desc = tx ? " Tx " : " Rx " ;
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1 ;
2019-07-08 09:40:46 +01:00
idr = tx ? & info - > tx_idr : & info - > rx_idr ;
2017-03-28 11:36:07 +01:00
2019-11-06 11:32:05 +00:00
/* check if already allocated, used for multiple device per protocol */
cinfo = idr_find ( idr , prot_id ) ;
if ( cinfo )
return 0 ;
2019-07-08 09:40:44 +01:00
if ( scmi_mailbox_check ( np , idx ) ) {
2019-07-08 09:40:46 +01:00
cinfo = idr_find ( idr , SCMI_PROTOCOL_BASE ) ;
if ( unlikely ( ! cinfo ) ) /* Possible only if platform has no Rx */
return - EINVAL ;
2017-07-31 15:43:27 +01:00
goto idr_alloc ;
}
2017-07-31 15:25:32 +01:00
cinfo = devm_kzalloc ( info - > dev , sizeof ( * cinfo ) , GFP_KERNEL ) ;
if ( ! cinfo )
return - ENOMEM ;
cinfo - > dev = dev ;
cl = & cinfo - > cl ;
2017-03-28 11:36:07 +01:00
cl - > dev = dev ;
cl - > rx_callback = scmi_rx_callback ;
2019-07-08 09:40:44 +01:00
cl - > tx_prepare = tx ? scmi_tx_prepare : NULL ;
2017-03-28 11:36:07 +01:00
cl - > tx_block = false ;
2019-07-08 09:40:44 +01:00
cl - > knows_txdone = tx ;
2017-03-28 11:36:07 +01:00
2019-07-08 09:40:44 +01:00
shmem = of_parse_phandle ( np , " shmem " , idx ) ;
2017-03-28 11:36:07 +01:00
ret = of_address_to_resource ( shmem , 0 , & res ) ;
of_node_put ( shmem ) ;
if ( ret ) {
2019-07-08 09:40:44 +01:00
dev_err ( dev , " failed to get SCMI %s payload memory \n " , desc ) ;
2017-03-28 11:36:07 +01:00
return ret ;
}
size = resource_size ( & res ) ;
2017-07-31 15:25:32 +01:00
cinfo - > payload = devm_ioremap ( info - > dev , res . start , size ) ;
if ( ! cinfo - > payload ) {
2019-07-08 09:40:44 +01:00
dev_err ( dev , " failed to ioremap SCMI %s payload \n " , desc ) ;
2017-03-28 11:36:07 +01:00
return - EADDRNOTAVAIL ;
}
2019-07-08 09:40:44 +01:00
cinfo - > chan = mbox_request_channel ( cl , idx ) ;
2017-07-31 15:25:32 +01:00
if ( IS_ERR ( cinfo - > chan ) ) {
ret = PTR_ERR ( cinfo - > chan ) ;
2017-03-28 11:36:07 +01:00
if ( ret ! = - EPROBE_DEFER )
2019-07-08 09:40:44 +01:00
dev_err ( dev , " failed to request SCMI %s mailbox \n " ,
desc ) ;
2017-03-28 11:36:07 +01:00
return ret ;
}
2017-07-31 15:43:27 +01:00
idr_alloc :
2019-07-08 09:40:46 +01:00
ret = idr_alloc ( idr , cinfo , prot_id , prot_id + 1 , GFP_KERNEL ) ;
2017-07-31 15:43:27 +01:00
if ( ret ! = prot_id ) {
dev_err ( dev , " unable to allocate SCMI idr slot err %d \n " , ret ) ;
return ret ;
}
cinfo - > handle = & info - > handle ;
2017-03-28 11:36:07 +01:00
return 0 ;
}
2019-07-08 09:40:46 +01:00
static inline int
scmi_mbox_txrx_setup ( struct scmi_info * info , struct device * dev , int prot_id )
{
int ret = scmi_mbox_chan_setup ( info , dev , prot_id , true ) ;
if ( ! ret ) /* Rx is optional, hence no error check */
scmi_mbox_chan_setup ( info , dev , prot_id , false ) ;
return ret ;
}
2017-06-06 11:39:08 +01:00
static inline void
scmi_create_protocol_device ( struct device_node * np , struct scmi_info * info ,
2018-12-21 18:08:08 +00:00
int prot_id , const char * name )
2017-06-06 11:39:08 +01:00
{
struct scmi_device * sdev ;
2018-12-21 18:08:08 +00:00
sdev = scmi_device_create ( np , info - > dev , prot_id , name ) ;
2017-06-06 11:39:08 +01:00
if ( ! sdev ) {
dev_err ( info - > dev , " failed to create %d protocol device \n " ,
prot_id ) ;
return ;
}
2019-07-08 09:40:46 +01:00
if ( scmi_mbox_txrx_setup ( info , & sdev - > dev , prot_id ) ) {
2017-07-31 15:43:27 +01:00
dev_err ( & sdev - > dev , " failed to setup transport \n " ) ;
scmi_device_destroy ( sdev ) ;
2018-04-27 17:06:49 +03:00
return ;
2017-07-31 15:43:27 +01:00
}
2017-06-06 11:39:08 +01:00
/* setup handle now as the transport is ready */
scmi_set_handle ( sdev ) ;
}
2019-11-06 15:17:26 +00:00
# define MAX_SCMI_DEV_PER_PROTOCOL 2
struct scmi_prot_devnames {
int protocol_id ;
char * names [ MAX_SCMI_DEV_PER_PROTOCOL ] ;
} ;
static struct scmi_prot_devnames devnames [ ] = {
{ SCMI_PROTOCOL_POWER , { " genpd " } , } ,
{ SCMI_PROTOCOL_PERF , { " cpufreq " } , } ,
{ SCMI_PROTOCOL_CLOCK , { " clocks " } , } ,
{ SCMI_PROTOCOL_SENSOR , { " hwmon " } , } ,
{ SCMI_PROTOCOL_RESET , { " reset " } , } ,
} ;
static inline void
scmi_create_protocol_devices ( struct device_node * np , struct scmi_info * info ,
int prot_id )
{
int loop , cnt ;
for ( loop = 0 ; loop < ARRAY_SIZE ( devnames ) ; loop + + ) {
if ( devnames [ loop ] . protocol_id ! = prot_id )
continue ;
for ( cnt = 0 ; cnt < ARRAY_SIZE ( devnames [ loop ] . names ) ; cnt + + ) {
const char * name = devnames [ loop ] . names [ cnt ] ;
if ( name )
scmi_create_protocol_device ( np , info , prot_id ,
name ) ;
}
}
}
2017-03-28 11:36:07 +01:00
static int scmi_probe ( struct platform_device * pdev )
{
int ret ;
struct scmi_handle * handle ;
const struct scmi_desc * desc ;
struct scmi_info * info ;
struct device * dev = & pdev - > dev ;
2017-06-06 11:39:08 +01:00
struct device_node * child , * np = dev - > of_node ;
2017-03-28 11:36:07 +01:00
/* Only mailbox method supported, check for the presence of one */
2019-07-08 09:40:44 +01:00
if ( scmi_mailbox_check ( np , 0 ) ) {
2017-03-28 11:36:07 +01:00
dev_err ( dev , " no mailbox found in %pOF \n " , np ) ;
return - EINVAL ;
}
2019-03-22 16:55:03 -05:00
desc = of_device_get_match_data ( dev ) ;
if ( ! desc )
return - EINVAL ;
2017-03-28 11:36:07 +01:00
info = devm_kzalloc ( dev , sizeof ( * info ) , GFP_KERNEL ) ;
if ( ! info )
return - ENOMEM ;
info - > dev = dev ;
info - > desc = desc ;
INIT_LIST_HEAD ( & info - > node ) ;
ret = scmi_xfer_info_init ( info ) ;
if ( ret )
return ret ;
platform_set_drvdata ( pdev , info ) ;
2017-07-31 15:43:27 +01:00
idr_init ( & info - > tx_idr ) ;
2019-07-08 09:40:46 +01:00
idr_init ( & info - > rx_idr ) ;
2017-03-28 11:36:07 +01:00
handle = & info - > handle ;
handle - > dev = info - > dev ;
2017-06-06 11:16:15 +01:00
handle - > version = & info - > version ;
2017-03-28 11:36:07 +01:00
2019-07-08 09:40:46 +01:00
ret = scmi_mbox_txrx_setup ( info , dev , SCMI_PROTOCOL_BASE ) ;
2017-03-28 11:36:07 +01:00
if ( ret )
return ret ;
2017-06-06 11:16:15 +01:00
ret = scmi_base_protocol_init ( handle ) ;
if ( ret ) {
dev_err ( dev , " unable to communicate with SCMI(%d) \n " , ret ) ;
return ret ;
}
2017-03-28 11:36:07 +01:00
mutex_lock ( & scmi_list_mutex ) ;
list_add_tail ( & info - > node , & scmi_list ) ;
mutex_unlock ( & scmi_list_mutex ) ;
2017-06-06 11:39:08 +01:00
for_each_available_child_of_node ( np , child ) {
u32 prot_id ;
if ( of_property_read_u32 ( child , " reg " , & prot_id ) )
continue ;
2018-05-09 17:52:06 +01:00
if ( ! FIELD_FIT ( MSG_PROTOCOL_ID_MASK , prot_id ) )
dev_err ( dev , " Out of range protocol %d \n " , prot_id ) ;
2017-06-06 11:39:08 +01:00
if ( ! scmi_is_protocol_implemented ( handle , prot_id ) ) {
dev_err ( dev , " SCMI protocol %d not implemented \n " ,
prot_id ) ;
continue ;
}
2019-11-06 15:17:26 +00:00
scmi_create_protocol_devices ( child , info , prot_id ) ;
2017-06-06 11:39:08 +01:00
}
2017-03-28 11:36:07 +01:00
return 0 ;
}
2019-07-08 09:40:42 +01:00
static int scmi_mbox_free_channel ( int id , void * p , void * data )
{
struct scmi_chan_info * cinfo = p ;
struct idr * idr = data ;
if ( ! IS_ERR_OR_NULL ( cinfo - > chan ) ) {
mbox_free_channel ( cinfo - > chan ) ;
cinfo - > chan = NULL ;
}
idr_remove ( idr , id ) ;
return 0 ;
}
static int scmi_remove ( struct platform_device * pdev )
{
int ret = 0 ;
struct scmi_info * info = platform_get_drvdata ( pdev ) ;
struct idr * idr = & info - > tx_idr ;
mutex_lock ( & scmi_list_mutex ) ;
if ( info - > users )
ret = - EBUSY ;
else
list_del ( & info - > node ) ;
mutex_unlock ( & scmi_list_mutex ) ;
if ( ret )
return ret ;
/* Safe to free channels since no more users */
ret = idr_for_each ( idr , scmi_mbox_free_channel , idr ) ;
idr_destroy ( & info - > tx_idr ) ;
2019-07-08 09:40:46 +01:00
idr = & info - > rx_idr ;
ret = idr_for_each ( idr , scmi_mbox_free_channel , idr ) ;
idr_destroy ( & info - > rx_idr ) ;
2019-07-08 09:40:42 +01:00
return ret ;
}
static const struct scmi_desc scmi_generic_desc = {
. max_rx_timeout_ms = 30 , /* We may increase this if required */
. max_msg = 20 , /* Limited by MBOX_TX_QUEUE_LEN */
. max_msg_size = 128 ,
} ;
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match [ ] = {
{ . compatible = " arm,scmi " , . data = & scmi_generic_desc } ,
{ /* Sentinel */ } ,
} ;
MODULE_DEVICE_TABLE ( of , scmi_of_match ) ;
2017-03-28 11:36:07 +01:00
static struct platform_driver scmi_driver = {
. driver = {
. name = " arm-scmi " ,
. of_match_table = scmi_of_match ,
} ,
. probe = scmi_probe ,
. remove = scmi_remove ,
} ;
module_platform_driver ( scmi_driver ) ;
MODULE_ALIAS ( " platform: arm-scmi " ) ;
MODULE_AUTHOR ( " Sudeep Holla <sudeep.holla@arm.com> " ) ;
MODULE_DESCRIPTION ( " ARM SCMI protocol driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;