2020-01-31 08:28:13 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface ( SCMI ) Message Mailbox Transport
* driver .
*
* Copyright ( C ) 2019 ARM Ltd .
*/
# include <linux/err.h>
# include <linux/device.h>
# include <linux/mailbox_client.h>
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/slab.h>
# include "common.h"
/**
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
*
* @ cl : Mailbox Client
* @ chan : Transmit / Receive mailbox channel
* @ cinfo : SCMI channel info
* @ shmem : Transmit / Receive shared memory area
*/
struct scmi_mailbox {
struct mbox_client cl ;
struct mbox_chan * chan ;
struct scmi_chan_info * cinfo ;
struct scmi_shared_mem __iomem * shmem ;
} ;
# define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
static void tx_prepare ( struct mbox_client * cl , void * m )
{
struct scmi_mailbox * smbox = client_to_scmi_mailbox ( cl ) ;
shmem_tx_prepare ( smbox - > shmem , m ) ;
}
static void rx_callback ( struct mbox_client * cl , void * m )
{
struct scmi_mailbox * smbox = client_to_scmi_mailbox ( cl ) ;
2021-08-03 16:10:23 +03:00
scmi_rx_callback ( smbox - > cinfo , shmem_read_header ( smbox - > shmem ) , NULL ) ;
2020-01-31 08:28:13 +03:00
}
static bool mailbox_chan_available ( struct device * dev , int idx )
{
return ! of_parse_phandle_with_args ( dev - > of_node , " mboxes " ,
" #mbox-cells " , idx , NULL ) ;
}
static int mailbox_chan_setup ( struct scmi_chan_info * cinfo , struct device * dev ,
bool tx )
{
const char * desc = tx ? " Tx " : " Rx " ;
struct device * cdev = cinfo - > dev ;
struct scmi_mailbox * smbox ;
struct device_node * shmem ;
int ret , idx = tx ? 0 : 1 ;
struct mbox_client * cl ;
resource_size_t size ;
struct resource res ;
smbox = devm_kzalloc ( dev , sizeof ( * smbox ) , GFP_KERNEL ) ;
if ( ! smbox )
return - ENOMEM ;
shmem = of_parse_phandle ( cdev - > of_node , " shmem " , idx ) ;
2021-06-02 10:38:51 +03:00
if ( ! of_device_is_compatible ( shmem , " arm,scmi-shmem " ) )
return - ENXIO ;
2020-01-31 08:28:13 +03:00
ret = of_address_to_resource ( shmem , 0 , & res ) ;
of_node_put ( shmem ) ;
if ( ret ) {
dev_err ( cdev , " failed to get SCMI %s shared memory \n " , desc ) ;
return ret ;
}
size = resource_size ( & res ) ;
smbox - > shmem = devm_ioremap ( dev , res . start , size ) ;
if ( ! smbox - > shmem ) {
dev_err ( dev , " failed to ioremap SCMI %s shared memory \n " , desc ) ;
return - EADDRNOTAVAIL ;
}
cl = & smbox - > cl ;
cl - > dev = cdev ;
cl - > tx_prepare = tx ? tx_prepare : NULL ;
cl - > rx_callback = rx_callback ;
cl - > tx_block = false ;
cl - > knows_txdone = tx ;
smbox - > chan = mbox_request_channel ( cl , tx ? 0 : 1 ) ;
if ( IS_ERR ( smbox - > chan ) ) {
ret = PTR_ERR ( smbox - > chan ) ;
if ( ret ! = - EPROBE_DEFER )
dev_err ( cdev , " failed to request SCMI %s mailbox \n " ,
tx ? " Tx " : " Rx " ) ;
return ret ;
}
cinfo - > transport_info = smbox ;
smbox - > cinfo = cinfo ;
return 0 ;
}
static int mailbox_chan_free ( int id , void * p , void * data )
{
struct scmi_chan_info * cinfo = p ;
struct scmi_mailbox * smbox = cinfo - > transport_info ;
2020-09-08 14:26:11 +03:00
if ( smbox & & ! IS_ERR ( smbox - > chan ) ) {
2020-01-31 08:28:13 +03:00
mbox_free_channel ( smbox - > chan ) ;
cinfo - > transport_info = NULL ;
smbox - > chan = NULL ;
smbox - > cinfo = NULL ;
}
scmi_free_channel ( cinfo , data , id ) ;
return 0 ;
}
static int mailbox_send_message ( struct scmi_chan_info * cinfo ,
struct scmi_xfer * xfer )
{
struct scmi_mailbox * smbox = cinfo - > transport_info ;
int ret ;
ret = mbox_send_message ( smbox - > chan , xfer ) ;
/* mbox_send_message returns non-negative value on success, so reset */
if ( ret > 0 )
ret = 0 ;
return ret ;
}
2021-12-20 22:56:43 +03:00
static void mailbox_mark_txdone ( struct scmi_chan_info * cinfo , int ret ,
struct scmi_xfer * __unused )
2020-01-31 08:28:13 +03:00
{
struct scmi_mailbox * smbox = cinfo - > transport_info ;
/*
* NOTE : we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself .
* Unfortunately , we have to kick the mailbox framework after we have
* received our message .
*/
mbox_client_txdone ( smbox - > chan , ret ) ;
}
static void mailbox_fetch_response ( struct scmi_chan_info * cinfo ,
struct scmi_xfer * xfer )
{
struct scmi_mailbox * smbox = cinfo - > transport_info ;
shmem_fetch_response ( smbox - > shmem , xfer ) ;
}
2020-03-27 17:34:28 +03:00
static void mailbox_fetch_notification ( struct scmi_chan_info * cinfo ,
size_t max_len , struct scmi_xfer * xfer )
{
struct scmi_mailbox * smbox = cinfo - > transport_info ;
shmem_fetch_notification ( smbox - > shmem , max_len , xfer ) ;
}
2020-04-20 18:23:12 +03:00
static void mailbox_clear_channel ( struct scmi_chan_info * cinfo )
2020-03-27 17:34:28 +03:00
{
struct scmi_mailbox * smbox = cinfo - > transport_info ;
2020-04-20 18:23:12 +03:00
shmem_clear_channel ( smbox - > shmem ) ;
2020-03-27 17:34:28 +03:00
}
2020-01-31 08:28:13 +03:00
static bool
mailbox_poll_done ( struct scmi_chan_info * cinfo , struct scmi_xfer * xfer )
{
struct scmi_mailbox * smbox = cinfo - > transport_info ;
return shmem_poll_done ( smbox - > shmem , xfer ) ;
}
2020-09-07 02:04:52 +03:00
static const struct scmi_transport_ops scmi_mailbox_ops = {
2020-01-31 08:28:13 +03:00
. chan_available = mailbox_chan_available ,
. chan_setup = mailbox_chan_setup ,
. chan_free = mailbox_chan_free ,
. send_message = mailbox_send_message ,
. mark_txdone = mailbox_mark_txdone ,
. fetch_response = mailbox_fetch_response ,
2020-03-27 17:34:28 +03:00
. fetch_notification = mailbox_fetch_notification ,
2020-04-20 18:23:12 +03:00
. clear_channel = mailbox_clear_channel ,
2020-01-31 08:28:13 +03:00
. poll_done = mailbox_poll_done ,
} ;
const struct scmi_desc scmi_mailbox_desc = {
. ops = & scmi_mailbox_ops ,
. max_rx_timeout_ms = 30 , /* We may increase this if required */
. max_msg = 20 , /* Limited by MBOX_TX_QUEUE_LEN */
. max_msg_size = 128 ,
} ;