2021-08-03 14:10:24 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* Virtio Transport driver for Arm System Control and Management Interface
* ( SCMI ) .
*
2022-02-17 13:12:29 +00:00
* Copyright ( C ) 2020 - 2022 OpenSynergy .
* Copyright ( C ) 2021 - 2022 ARM Ltd .
2021-08-03 14:10:24 +01:00
*/
/**
* DOC : Theory of Operation
*
* The scmi - virtio transport implements a driver for the virtio SCMI device .
*
* There is one Tx channel ( virtio cmdq , A2P channel ) and at most one Rx
* channel ( virtio eventq , P2A channel ) . Each channel is implemented through a
* virtqueue . Access to each virtqueue is protected by spinlocks .
*/
2022-02-17 13:12:27 +00:00
# include <linux/completion.h>
2021-08-03 14:10:24 +01:00
# include <linux/errno.h>
2022-02-17 13:12:27 +00:00
# include <linux/refcount.h>
2021-08-03 14:10:24 +01:00
# include <linux/slab.h>
# include <linux/virtio.h>
# include <linux/virtio_config.h>
# include <uapi/linux/virtio_ids.h>
# include <uapi/linux/virtio_scmi.h>
# include "common.h"
2022-02-17 13:12:27 +00:00
# define VIRTIO_MAX_RX_TIMEOUT_MS 60000
2021-08-03 14:10:24 +01:00
# define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
# define VIRTIO_SCMI_MAX_PDU_SIZE \
( VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD )
# define DESCRIPTORS_PER_TX_MSG 2
/**
* struct scmi_vio_channel - Transport channel information
*
* @ vqueue : Associated virtqueue
* @ cinfo : SCMI Tx or Rx channel
2022-02-17 13:12:28 +00:00
* @ free_lock : Protects access to the @ free_list .
2021-08-03 14:10:24 +01:00
* @ free_list : List of unused scmi_vio_msg , maintained for Tx channels only
2022-02-17 13:12:29 +00:00
* @ deferred_tx_work : Worker for TX deferred replies processing
* @ deferred_tx_wq : Workqueue for TX deferred replies
* @ pending_lock : Protects access to the @ pending_cmds_list .
* @ pending_cmds_list : List of pre - fetched commands queueud for later processing
2021-08-03 14:10:24 +01:00
* @ is_rx : Whether channel is an Rx channel
* @ max_msg : Maximum number of pending messages for this channel .
2022-02-17 13:12:29 +00:00
* @ lock : Protects access to all members except users , free_list and
* pending_cmds_list .
2022-02-17 13:12:27 +00:00
* @ shutdown_done : A reference to a completion used when freeing this channel .
* @ users : A reference count to currently active users of this channel .
2021-08-03 14:10:24 +01:00
*/
struct scmi_vio_channel {
struct virtqueue * vqueue ;
struct scmi_chan_info * cinfo ;
2022-02-17 13:12:28 +00:00
/* lock to protect access to the free list. */
spinlock_t free_lock ;
2021-08-03 14:10:24 +01:00
struct list_head free_list ;
2022-02-17 13:12:29 +00:00
/* lock to protect access to the pending list. */
spinlock_t pending_lock ;
struct list_head pending_cmds_list ;
struct work_struct deferred_tx_work ;
struct workqueue_struct * deferred_tx_wq ;
2021-08-03 14:10:24 +01:00
bool is_rx ;
unsigned int max_msg ;
2022-02-17 13:12:29 +00:00
/*
* Lock to protect access to all members except users , free_list and
* pending_cmds_list
*/
2021-08-03 14:10:24 +01:00
spinlock_t lock ;
2022-02-17 13:12:27 +00:00
struct completion * shutdown_done ;
refcount_t users ;
2021-08-03 14:10:24 +01:00
} ;
2022-02-17 13:12:29 +00:00
enum poll_states {
VIO_MSG_NOT_POLLED ,
VIO_MSG_POLL_TIMEOUT ,
VIO_MSG_POLLING ,
VIO_MSG_POLL_DONE ,
} ;
2021-08-03 14:10:24 +01:00
/**
* struct scmi_vio_msg - Transport PDU information
*
* @ request : SDU used for commands
* @ input : SDU used for ( delayed ) responses and notifications
* @ list : List which scmi_vio_msg may be part of
* @ rx_len : Input SDU size in bytes , once input has been received
2022-02-17 13:12:29 +00:00
* @ poll_idx : Last used index registered for polling purposes if this message
* transaction reply was configured for polling .
* @ poll_status : Polling state for this message .
* @ poll_lock : A lock to protect @ poll_status
* @ users : A reference count to track this message users and avoid premature
* freeing ( and reuse ) when polling and IRQ execution paths interleave .
2021-08-03 14:10:24 +01:00
*/
struct scmi_vio_msg {
struct scmi_msg_payld * request ;
struct scmi_msg_payld * input ;
struct list_head list ;
unsigned int rx_len ;
2022-02-17 13:12:29 +00:00
unsigned int poll_idx ;
enum poll_states poll_status ;
/* Lock to protect access to poll_status */
spinlock_t poll_lock ;
refcount_t users ;
2021-08-03 14:10:24 +01:00
} ;
/* Only one SCMI VirtIO device can possibly exist */
static struct virtio_device * scmi_vdev ;
2022-02-17 13:12:27 +00:00
static void scmi_vio_channel_ready ( struct scmi_vio_channel * vioch ,
struct scmi_chan_info * cinfo )
{
unsigned long flags ;
spin_lock_irqsave ( & vioch - > lock , flags ) ;
cinfo - > transport_info = vioch ;
/* Indirectly setting channel not available any more */
vioch - > cinfo = cinfo ;
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
refcount_set ( & vioch - > users , 1 ) ;
}
static inline bool scmi_vio_channel_acquire ( struct scmi_vio_channel * vioch )
{
return refcount_inc_not_zero ( & vioch - > users ) ;
}
static inline void scmi_vio_channel_release ( struct scmi_vio_channel * vioch )
{
if ( refcount_dec_and_test ( & vioch - > users ) ) {
unsigned long flags ;
spin_lock_irqsave ( & vioch - > lock , flags ) ;
if ( vioch - > shutdown_done ) {
vioch - > cinfo = NULL ;
complete ( vioch - > shutdown_done ) ;
}
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
}
}
static void scmi_vio_channel_cleanup_sync ( struct scmi_vio_channel * vioch )
{
unsigned long flags ;
DECLARE_COMPLETION_ONSTACK ( vioch_shutdown_done ) ;
/*
* Prepare to wait for the last release if not already released
* or in progress .
*/
spin_lock_irqsave ( & vioch - > lock , flags ) ;
if ( ! vioch - > cinfo | | vioch - > shutdown_done ) {
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
return ;
}
2022-02-17 13:12:29 +00:00
2022-02-17 13:12:27 +00:00
vioch - > shutdown_done = & vioch_shutdown_done ;
2022-10-28 15:08:31 +01:00
if ( ! vioch - > is_rx & & vioch - > deferred_tx_wq )
2022-02-17 13:12:29 +00:00
/* Cannot be kicked anymore after this...*/
vioch - > deferred_tx_wq = NULL ;
2022-02-17 13:12:27 +00:00
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
scmi_vio_channel_release ( vioch ) ;
/* Let any possibly concurrent RX path release the channel */
wait_for_completion ( vioch - > shutdown_done ) ;
}
2022-02-17 13:12:28 +00:00
/* Assumes to be called with vio channel acquired already */
static struct scmi_vio_msg *
scmi_virtio_get_free_msg ( struct scmi_vio_channel * vioch )
{
unsigned long flags ;
struct scmi_vio_msg * msg ;
spin_lock_irqsave ( & vioch - > free_lock , flags ) ;
if ( list_empty ( & vioch - > free_list ) ) {
spin_unlock_irqrestore ( & vioch - > free_lock , flags ) ;
return NULL ;
}
msg = list_first_entry ( & vioch - > free_list , typeof ( * msg ) , list ) ;
list_del_init ( & msg - > list ) ;
spin_unlock_irqrestore ( & vioch - > free_lock , flags ) ;
2022-02-17 13:12:29 +00:00
/* Still no users, no need to acquire poll_lock */
msg - > poll_status = VIO_MSG_NOT_POLLED ;
refcount_set ( & msg - > users , 1 ) ;
2022-02-17 13:12:28 +00:00
return msg ;
}
2022-02-17 13:12:29 +00:00
static inline bool scmi_vio_msg_acquire ( struct scmi_vio_msg * msg )
{
return refcount_inc_not_zero ( & msg - > users ) ;
}
2022-02-17 13:12:28 +00:00
/* Assumes to be called with vio channel acquired already */
2022-02-17 13:12:29 +00:00
static inline bool scmi_vio_msg_release ( struct scmi_vio_channel * vioch ,
struct scmi_vio_msg * msg )
2022-02-17 13:12:28 +00:00
{
2022-02-17 13:12:29 +00:00
bool ret ;
2022-02-17 13:12:28 +00:00
2022-02-17 13:12:29 +00:00
ret = refcount_dec_and_test ( & msg - > users ) ;
if ( ret ) {
unsigned long flags ;
spin_lock_irqsave ( & vioch - > free_lock , flags ) ;
list_add_tail ( & msg - > list , & vioch - > free_list ) ;
spin_unlock_irqrestore ( & vioch - > free_lock , flags ) ;
}
return ret ;
2022-02-17 13:12:28 +00:00
}
2021-08-03 14:10:24 +01:00
static bool scmi_vio_have_vq_rx ( struct virtio_device * vdev )
{
return virtio_has_feature ( vdev , VIRTIO_SCMI_F_P2A_CHANNELS ) ;
}
static int scmi_vio_feed_vq_rx ( struct scmi_vio_channel * vioch ,
2022-02-17 13:12:28 +00:00
struct scmi_vio_msg * msg )
2021-08-03 14:10:24 +01:00
{
struct scatterlist sg_in ;
int rc ;
unsigned long flags ;
2022-02-17 13:12:28 +00:00
struct device * dev = & vioch - > vqueue - > vdev - > dev ;
2021-08-03 14:10:24 +01:00
sg_init_one ( & sg_in , msg - > input , VIRTIO_SCMI_MAX_PDU_SIZE ) ;
spin_lock_irqsave ( & vioch - > lock , flags ) ;
rc = virtqueue_add_inbuf ( vioch - > vqueue , & sg_in , 1 , msg , GFP_ATOMIC ) ;
if ( rc )
2021-09-16 11:33:34 +01:00
dev_err ( dev , " failed to add to RX virtqueue (%d) \n " , rc ) ;
2021-08-03 14:10:24 +01:00
else
virtqueue_kick ( vioch - > vqueue ) ;
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
return rc ;
}
2022-02-17 13:12:28 +00:00
/*
* Assume to be called with channel already acquired or not ready at all ;
* vioch - > lock MUST NOT have been already acquired .
*/
2021-08-03 14:10:24 +01:00
static void scmi_finalize_message ( struct scmi_vio_channel * vioch ,
struct scmi_vio_msg * msg )
{
2022-02-17 13:12:28 +00:00
if ( vioch - > is_rx )
scmi_vio_feed_vq_rx ( vioch , msg ) ;
else
2022-02-17 13:12:29 +00:00
scmi_vio_msg_release ( vioch , msg ) ;
2021-08-03 14:10:24 +01:00
}
static void scmi_vio_complete_cb ( struct virtqueue * vqueue )
{
2022-02-17 13:12:27 +00:00
unsigned long flags ;
2021-08-03 14:10:24 +01:00
unsigned int length ;
struct scmi_vio_channel * vioch ;
struct scmi_vio_msg * msg ;
bool cb_enabled = true ;
if ( WARN_ON_ONCE ( ! vqueue - > vdev - > priv ) )
return ;
vioch = & ( ( struct scmi_vio_channel * ) vqueue - > vdev - > priv ) [ vqueue - > index ] ;
for ( ; ; ) {
2022-02-17 13:12:27 +00:00
if ( ! scmi_vio_channel_acquire ( vioch ) )
return ;
2021-08-03 14:10:24 +01:00
2022-02-17 13:12:27 +00:00
spin_lock_irqsave ( & vioch - > lock , flags ) ;
2021-08-03 14:10:24 +01:00
if ( cb_enabled ) {
virtqueue_disable_cb ( vqueue ) ;
cb_enabled = false ;
}
2022-02-17 13:12:29 +00:00
2021-08-03 14:10:24 +01:00
msg = virtqueue_get_buf ( vqueue , & length ) ;
if ( ! msg ) {
2022-02-17 13:12:27 +00:00
if ( virtqueue_enable_cb ( vqueue ) ) {
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
scmi_vio_channel_release ( vioch ) ;
return ;
}
2021-08-03 14:10:24 +01:00
cb_enabled = true ;
}
2022-02-17 13:12:27 +00:00
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
2021-08-03 14:10:24 +01:00
if ( msg ) {
msg - > rx_len = length ;
scmi_rx_callback ( vioch - > cinfo ,
msg_read_header ( msg - > input ) , msg ) ;
scmi_finalize_message ( vioch , msg ) ;
}
2021-09-16 11:33:35 +01:00
/*
2022-02-17 13:12:27 +00:00
* Release vio channel between loop iterations to allow
* virtio_chan_free ( ) to eventually fully release it when
* shutting down ; in such a case , any outstanding message will
* be ignored since this loop will bail out at the next
* iteration .
2021-09-16 11:33:35 +01:00
*/
2022-02-17 13:12:27 +00:00
scmi_vio_channel_release ( vioch ) ;
2021-08-03 14:10:24 +01:00
}
}
2022-02-17 13:12:29 +00:00
static void scmi_vio_deferred_tx_worker ( struct work_struct * work )
{
unsigned long flags ;
struct scmi_vio_channel * vioch ;
struct scmi_vio_msg * msg , * tmp ;
vioch = container_of ( work , struct scmi_vio_channel , deferred_tx_work ) ;
if ( ! scmi_vio_channel_acquire ( vioch ) )
return ;
/*
* Process pre - fetched messages : these could be non - polled messages or
* late timed - out replies to polled messages dequeued by chance while
* polling for some other messages : this worker is in charge to process
* the valid non - expired messages and anyway finally free all of them .
*/
spin_lock_irqsave ( & vioch - > pending_lock , flags ) ;
/* Scan the list of possibly pre-fetched messages during polling. */
list_for_each_entry_safe ( msg , tmp , & vioch - > pending_cmds_list , list ) {
list_del ( & msg - > list ) ;
/*
* Channel is acquired here ( cannot vanish ) and this message
* is no more processed elsewhere so no poll_lock needed .
*/
if ( msg - > poll_status = = VIO_MSG_NOT_POLLED )
scmi_rx_callback ( vioch - > cinfo ,
msg_read_header ( msg - > input ) , msg ) ;
/* Free the processed message once done */
scmi_vio_msg_release ( vioch , msg ) ;
}
spin_unlock_irqrestore ( & vioch - > pending_lock , flags ) ;
/* Process possibly still pending messages */
scmi_vio_complete_cb ( vioch - > vqueue ) ;
scmi_vio_channel_release ( vioch ) ;
}
2021-08-03 14:10:24 +01:00
static const char * const scmi_vio_vqueue_names [ ] = { " tx " , " rx " } ;
static vq_callback_t * scmi_vio_complete_callbacks [ ] = {
scmi_vio_complete_cb ,
scmi_vio_complete_cb
} ;
static unsigned int virtio_get_max_msg ( struct scmi_chan_info * base_cinfo )
{
struct scmi_vio_channel * vioch = base_cinfo - > transport_info ;
return vioch - > max_msg ;
}
static int virtio_link_supplier ( struct device * dev )
{
if ( ! scmi_vdev ) {
2021-09-16 11:33:34 +01:00
dev_notice ( dev ,
" Deferring probe after not finding a bound scmi-virtio device \n " ) ;
2021-08-03 14:10:24 +01:00
return - EPROBE_DEFER ;
}
if ( ! device_link_add ( dev , & scmi_vdev - > dev ,
DL_FLAG_AUTOREMOVE_CONSUMER ) ) {
dev_err ( dev , " Adding link to supplier virtio device failed \n " ) ;
return - ECANCELED ;
}
return 0 ;
}
2022-12-22 18:50:41 +00:00
static bool virtio_chan_available ( struct device_node * of_node , int idx )
2021-08-03 14:10:24 +01:00
{
struct scmi_vio_channel * channels , * vioch = NULL ;
if ( WARN_ON_ONCE ( ! scmi_vdev ) )
return false ;
channels = ( struct scmi_vio_channel * ) scmi_vdev - > priv ;
switch ( idx ) {
case VIRTIO_SCMI_VQ_TX :
vioch = & channels [ VIRTIO_SCMI_VQ_TX ] ;
break ;
case VIRTIO_SCMI_VQ_RX :
if ( scmi_vio_have_vq_rx ( scmi_vdev ) )
vioch = & channels [ VIRTIO_SCMI_VQ_RX ] ;
break ;
default :
return false ;
}
2021-08-08 01:31:27 +08:00
return vioch & & ! vioch - > cinfo ;
2021-08-03 14:10:24 +01:00
}
2022-10-28 15:08:31 +01:00
static void scmi_destroy_tx_workqueue ( void * deferred_tx_wq )
{
destroy_workqueue ( deferred_tx_wq ) ;
}
2021-08-03 14:10:24 +01:00
static int virtio_chan_setup ( struct scmi_chan_info * cinfo , struct device * dev ,
bool tx )
{
struct scmi_vio_channel * vioch ;
int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX ;
int i ;
if ( ! scmi_vdev )
return - EPROBE_DEFER ;
vioch = & ( ( struct scmi_vio_channel * ) scmi_vdev - > priv ) [ index ] ;
2022-02-17 13:12:29 +00:00
/* Setup a deferred worker for polling. */
if ( tx & & ! vioch - > deferred_tx_wq ) {
2022-10-28 15:08:31 +01:00
int ret ;
2022-02-17 13:12:29 +00:00
vioch - > deferred_tx_wq =
alloc_workqueue ( dev_name ( & scmi_vdev - > dev ) ,
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS ,
0 ) ;
if ( ! vioch - > deferred_tx_wq )
return - ENOMEM ;
2022-10-28 15:08:31 +01:00
ret = devm_add_action_or_reset ( dev , scmi_destroy_tx_workqueue ,
vioch - > deferred_tx_wq ) ;
if ( ret )
return ret ;
2022-02-17 13:12:29 +00:00
INIT_WORK ( & vioch - > deferred_tx_work ,
scmi_vio_deferred_tx_worker ) ;
}
2021-08-03 14:10:24 +01:00
for ( i = 0 ; i < vioch - > max_msg ; i + + ) {
struct scmi_vio_msg * msg ;
2022-10-28 15:08:30 +01:00
msg = devm_kzalloc ( dev , sizeof ( * msg ) , GFP_KERNEL ) ;
2021-08-03 14:10:24 +01:00
if ( ! msg )
return - ENOMEM ;
if ( tx ) {
2022-10-28 15:08:30 +01:00
msg - > request = devm_kzalloc ( dev ,
2021-08-03 14:10:24 +01:00
VIRTIO_SCMI_MAX_PDU_SIZE ,
GFP_KERNEL ) ;
if ( ! msg - > request )
return - ENOMEM ;
2022-02-17 13:12:29 +00:00
spin_lock_init ( & msg - > poll_lock ) ;
refcount_set ( & msg - > users , 1 ) ;
2021-08-03 14:10:24 +01:00
}
2022-10-28 15:08:30 +01:00
msg - > input = devm_kzalloc ( dev , VIRTIO_SCMI_MAX_PDU_SIZE ,
2021-08-03 14:10:24 +01:00
GFP_KERNEL ) ;
if ( ! msg - > input )
return - ENOMEM ;
2022-02-17 13:12:28 +00:00
scmi_finalize_message ( vioch , msg ) ;
2021-08-03 14:10:24 +01:00
}
2022-02-17 13:12:27 +00:00
scmi_vio_channel_ready ( vioch , cinfo ) ;
2021-08-03 14:10:24 +01:00
return 0 ;
}
static int virtio_chan_free ( int id , void * p , void * data )
{
struct scmi_chan_info * cinfo = p ;
struct scmi_vio_channel * vioch = cinfo - > transport_info ;
2022-12-22 18:38:23 +00:00
/*
* Break device to inhibit further traffic flowing while shutting down
* the channels : doing it later holding vioch - > lock creates unsafe
* locking dependency chains as reported by LOCKDEP .
*/
virtio_break_device ( vioch - > vqueue - > vdev ) ;
2022-02-17 13:12:27 +00:00
scmi_vio_channel_cleanup_sync ( vioch ) ;
2021-08-03 14:10:24 +01:00
return 0 ;
}
static int virtio_send_message ( struct scmi_chan_info * cinfo ,
struct scmi_xfer * xfer )
{
struct scmi_vio_channel * vioch = cinfo - > transport_info ;
struct scatterlist sg_out ;
struct scatterlist sg_in ;
struct scatterlist * sgs [ DESCRIPTORS_PER_TX_MSG ] = { & sg_out , & sg_in } ;
unsigned long flags ;
int rc ;
struct scmi_vio_msg * msg ;
2022-02-17 13:12:27 +00:00
if ( ! scmi_vio_channel_acquire ( vioch ) )
return - EINVAL ;
2022-02-17 13:12:28 +00:00
msg = scmi_virtio_get_free_msg ( vioch ) ;
if ( ! msg ) {
2022-02-17 13:12:27 +00:00
scmi_vio_channel_release ( vioch ) ;
2021-08-03 14:10:24 +01:00
return - EBUSY ;
}
msg_tx_prepare ( msg - > request , xfer ) ;
sg_init_one ( & sg_out , msg - > request , msg_command_size ( xfer ) ) ;
sg_init_one ( & sg_in , msg - > input , msg_response_size ( xfer ) ) ;
2022-02-17 13:12:28 +00:00
spin_lock_irqsave ( & vioch - > lock , flags ) ;
2022-02-17 13:12:29 +00:00
/*
* If polling was requested for this transaction :
* - retrieve last used index ( will be used as polling reference )
* - bind the polled message to the xfer via . priv
* - grab an additional msg refcount for the poll - path
*/
if ( xfer - > hdr . poll_completion ) {
msg - > poll_idx = virtqueue_enable_cb_prepare ( vioch - > vqueue ) ;
/* Still no users, no need to acquire poll_lock */
msg - > poll_status = VIO_MSG_POLLING ;
scmi_vio_msg_acquire ( msg ) ;
/* Ensure initialized msg is visibly bound to xfer */
smp_store_mb ( xfer - > priv , msg ) ;
}
2021-08-03 14:10:24 +01:00
rc = virtqueue_add_sgs ( vioch - > vqueue , sgs , 1 , 1 , msg , GFP_ATOMIC ) ;
2022-02-17 13:12:28 +00:00
if ( rc )
2021-09-16 11:33:34 +01:00
dev_err ( vioch - > cinfo - > dev ,
" failed to add to TX virtqueue (%d) \n " , rc ) ;
2022-02-17 13:12:28 +00:00
else
2021-08-03 14:10:24 +01:00
virtqueue_kick ( vioch - > vqueue ) ;
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
2022-02-17 13:12:29 +00:00
if ( rc ) {
/* Ensure order between xfer->priv clear and vq feeding */
smp_store_mb ( xfer - > priv , NULL ) ;
if ( xfer - > hdr . poll_completion )
scmi_vio_msg_release ( vioch , msg ) ;
scmi_vio_msg_release ( vioch , msg ) ;
}
2022-02-17 13:12:28 +00:00
2022-02-17 13:12:27 +00:00
scmi_vio_channel_release ( vioch ) ;
2021-08-03 14:10:24 +01:00
return rc ;
}
static void virtio_fetch_response ( struct scmi_chan_info * cinfo ,
struct scmi_xfer * xfer )
{
struct scmi_vio_msg * msg = xfer - > priv ;
2022-02-17 13:12:29 +00:00
if ( msg )
2021-08-03 14:10:24 +01:00
msg_fetch_response ( msg - > input , msg - > rx_len , xfer ) ;
}
static void virtio_fetch_notification ( struct scmi_chan_info * cinfo ,
size_t max_len , struct scmi_xfer * xfer )
{
struct scmi_vio_msg * msg = xfer - > priv ;
2022-02-17 13:12:29 +00:00
if ( msg )
2021-08-03 14:10:24 +01:00
msg_fetch_notification ( msg - > input , msg - > rx_len , max_len , xfer ) ;
2022-02-17 13:12:29 +00:00
}
/**
* virtio_mark_txdone - Mark transmission done
*
* Free only completed polling transfer messages .
*
* Note that in the SCMI VirtIO transport we never explicitly release still
* outstanding but timed - out messages by forcibly re - adding them to the
* free - list inside the TX code path ; we instead let IRQ / RX callbacks , or the
* TX deferred worker , eventually clean up such messages once , finally , a late
* reply is received and discarded ( if ever ) .
*
* This approach was deemed preferable since those pending timed - out buffers are
* still effectively owned by the SCMI platform VirtIO device even after timeout
* expiration : forcibly freeing and reusing them before they had been returned
* explicitly by the SCMI platform could lead to subtle bugs due to message
* corruption .
* An SCMI platform VirtIO device which never returns message buffers is
* anyway broken and it will quickly lead to exhaustion of available messages .
*
* For this same reason , here , we take care to free only the polled messages
* that had been somehow replied ( only if not by chance already processed on the
* IRQ path - the initial scmi_vio_msg_release ( ) takes care of this ) and also
* any timed - out polled message if that indeed appears to have been at least
* dequeued from the virtqueues ( VIO_MSG_POLL_DONE ) : this is needed since such
* messages won ' t be freed elsewhere . Any other polled message is marked as
* VIO_MSG_POLL_TIMEOUT .
*
* Possible late replies to timed - out polled messages will be eventually freed
* by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
* dequeued on some other polling path .
*
* @ cinfo : SCMI channel info
* @ ret : Transmission return code
* @ xfer : Transfer descriptor
*/
static void virtio_mark_txdone ( struct scmi_chan_info * cinfo , int ret ,
struct scmi_xfer * xfer )
{
unsigned long flags ;
struct scmi_vio_channel * vioch = cinfo - > transport_info ;
struct scmi_vio_msg * msg = xfer - > priv ;
if ( ! msg | | ! scmi_vio_channel_acquire ( vioch ) )
return ;
/* Ensure msg is unbound from xfer anyway at this point */
smp_store_mb ( xfer - > priv , NULL ) ;
/* Must be a polled xfer and not already freed on the IRQ path */
if ( ! xfer - > hdr . poll_completion | | scmi_vio_msg_release ( vioch , msg ) ) {
scmi_vio_channel_release ( vioch ) ;
return ;
2021-08-03 14:10:24 +01:00
}
2022-02-17 13:12:29 +00:00
spin_lock_irqsave ( & msg - > poll_lock , flags ) ;
/* Do not free timedout polled messages only if still inflight */
if ( ret ! = - ETIMEDOUT | | msg - > poll_status = = VIO_MSG_POLL_DONE )
scmi_vio_msg_release ( vioch , msg ) ;
else if ( msg - > poll_status = = VIO_MSG_POLLING )
msg - > poll_status = VIO_MSG_POLL_TIMEOUT ;
spin_unlock_irqrestore ( & msg - > poll_lock , flags ) ;
scmi_vio_channel_release ( vioch ) ;
}
/**
* virtio_poll_done - Provide polling support for VirtIO transport
*
* @ cinfo : SCMI channel info
* @ xfer : Reference to the transfer being poll for .
*
* VirtIO core provides a polling mechanism based only on last used indexes :
* this means that it is possible to poll the virtqueues waiting for something
* new to arrive from the host side , but the only way to check if the freshly
* arrived buffer was indeed what we were waiting for is to compare the newly
* arrived message descriptor with the one we are polling on .
*
* As a consequence it can happen to dequeue something different from the buffer
* we were poll - waiting for : if that is the case such early fetched buffers are
* then added to a the @ pending_cmds_list list for later processing by a
* dedicated deferred worker .
*
* So , basically , once something new is spotted we proceed to de - queue all the
* freshly received used buffers until we found the one we were polling on , or ,
* we have ' seemingly ' emptied the virtqueue ; if some buffers are still pending
* in the vqueue at the end of the polling loop ( possible due to inherent races
* in virtqueues handling mechanisms ) , we similarly kick the deferred worker
* and let it process those , to avoid indefinitely looping in the . poll_done
* busy - waiting helper .
*
* Finally , we delegate to the deferred worker also the final free of any timed
* out reply to a polled message that we should dequeue .
*
* Note that , since we do NOT have per - message suppress notification mechanism ,
* the message we are polling for could be alternatively delivered via usual
* IRQs callbacks on another core which happened to have IRQs enabled while we
* are actively polling for it here : in such a case it will be handled as such
* by scmi_rx_callback ( ) and the polling loop in the SCMI Core TX path will be
* transparently terminated anyway .
*
* Return : True once polling has successfully completed .
*/
static bool virtio_poll_done ( struct scmi_chan_info * cinfo ,
struct scmi_xfer * xfer )
{
bool pending , found = false ;
unsigned int length , any_prefetched = 0 ;
unsigned long flags ;
struct scmi_vio_msg * next_msg , * msg = xfer - > priv ;
struct scmi_vio_channel * vioch = cinfo - > transport_info ;
if ( ! msg )
return true ;
/*
* Processed already by other polling loop on another CPU ?
*
* Note that this message is acquired on the poll path so cannot vanish
* while inside this loop iteration even if concurrently processed on
* the IRQ path .
*
* Avoid to acquire poll_lock since polled_status can be changed
* in a relevant manner only later in this same thread of execution :
* any other possible changes made concurrently by other polling loops
* or by a reply delivered on the IRQ path have no meaningful impact on
* this loop iteration : in other words it is harmless to allow this
* possible race but let has avoid spinlocking with irqs off in this
* initial part of the polling loop .
*/
if ( msg - > poll_status = = VIO_MSG_POLL_DONE )
return true ;
if ( ! scmi_vio_channel_acquire ( vioch ) )
return true ;
/* Has cmdq index moved at all ? */
pending = virtqueue_poll ( vioch - > vqueue , msg - > poll_idx ) ;
if ( ! pending ) {
scmi_vio_channel_release ( vioch ) ;
return false ;
}
spin_lock_irqsave ( & vioch - > lock , flags ) ;
virtqueue_disable_cb ( vioch - > vqueue ) ;
/*
* Process all new messages till the polled - for message is found OR
* the vqueue is empty .
*/
while ( ( next_msg = virtqueue_get_buf ( vioch - > vqueue , & length ) ) ) {
bool next_msg_done = false ;
/*
* Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
* that can be properly freed even on timeout in mark_txdone .
*/
spin_lock ( & next_msg - > poll_lock ) ;
if ( next_msg - > poll_status = = VIO_MSG_POLLING ) {
next_msg - > poll_status = VIO_MSG_POLL_DONE ;
next_msg_done = true ;
}
spin_unlock ( & next_msg - > poll_lock ) ;
next_msg - > rx_len = length ;
/* Is the message we were polling for ? */
if ( next_msg = = msg ) {
found = true ;
break ;
} else if ( next_msg_done ) {
/* Skip the rest if this was another polled msg */
continue ;
}
/*
* Enqueue for later processing any non - polled message and any
* timed - out polled one that we happen to have dequeued .
*/
spin_lock ( & next_msg - > poll_lock ) ;
if ( next_msg - > poll_status = = VIO_MSG_NOT_POLLED | |
next_msg - > poll_status = = VIO_MSG_POLL_TIMEOUT ) {
spin_unlock ( & next_msg - > poll_lock ) ;
any_prefetched + + ;
spin_lock ( & vioch - > pending_lock ) ;
list_add_tail ( & next_msg - > list ,
& vioch - > pending_cmds_list ) ;
spin_unlock ( & vioch - > pending_lock ) ;
} else {
spin_unlock ( & next_msg - > poll_lock ) ;
}
}
/*
* When the polling loop has successfully terminated if something
* else was queued in the meantime , it will be served by a deferred
* worker OR by the normal IRQ / callback OR by other poll loops .
*
* If we are still looking for the polled reply , the polling index has
* to be updated to the current vqueue last used index .
*/
if ( found ) {
pending = ! virtqueue_enable_cb ( vioch - > vqueue ) ;
} else {
msg - > poll_idx = virtqueue_enable_cb_prepare ( vioch - > vqueue ) ;
pending = virtqueue_poll ( vioch - > vqueue , msg - > poll_idx ) ;
}
if ( vioch - > deferred_tx_wq & & ( any_prefetched | | pending ) )
queue_work ( vioch - > deferred_tx_wq , & vioch - > deferred_tx_work ) ;
spin_unlock_irqrestore ( & vioch - > lock , flags ) ;
scmi_vio_channel_release ( vioch ) ;
return found ;
2021-08-03 14:10:24 +01:00
}
static const struct scmi_transport_ops scmi_virtio_ops = {
. link_supplier = virtio_link_supplier ,
. chan_available = virtio_chan_available ,
. chan_setup = virtio_chan_setup ,
. chan_free = virtio_chan_free ,
. get_max_msg = virtio_get_max_msg ,
. send_message = virtio_send_message ,
. fetch_response = virtio_fetch_response ,
. fetch_notification = virtio_fetch_notification ,
2022-02-17 13:12:29 +00:00
. mark_txdone = virtio_mark_txdone ,
. poll_done = virtio_poll_done ,
2021-08-03 14:10:24 +01:00
} ;
static int scmi_vio_probe ( struct virtio_device * vdev )
{
struct device * dev = & vdev - > dev ;
struct scmi_vio_channel * channels ;
bool have_vq_rx ;
int vq_cnt ;
int i ;
int ret ;
struct virtqueue * vqs [ VIRTIO_SCMI_VQ_MAX_CNT ] ;
/* Only one SCMI VirtiO device allowed */
2021-09-16 11:33:36 +01:00
if ( scmi_vdev ) {
dev_err ( dev ,
" One SCMI Virtio device was already initialized: only one allowed. \n " ) ;
return - EBUSY ;
}
2021-08-03 14:10:24 +01:00
have_vq_rx = scmi_vio_have_vq_rx ( vdev ) ;
vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1 ;
channels = devm_kcalloc ( dev , vq_cnt , sizeof ( * channels ) , GFP_KERNEL ) ;
if ( ! channels )
return - ENOMEM ;
if ( have_vq_rx )
channels [ VIRTIO_SCMI_VQ_RX ] . is_rx = true ;
ret = virtio_find_vqs ( vdev , vq_cnt , vqs , scmi_vio_complete_callbacks ,
scmi_vio_vqueue_names , NULL ) ;
if ( ret ) {
dev_err ( dev , " Failed to get %d virtqueue(s) \n " , vq_cnt ) ;
return ret ;
}
for ( i = 0 ; i < vq_cnt ; i + + ) {
unsigned int sz ;
spin_lock_init ( & channels [ i ] . lock ) ;
2022-02-17 13:12:28 +00:00
spin_lock_init ( & channels [ i ] . free_lock ) ;
2021-08-03 14:10:24 +01:00
INIT_LIST_HEAD ( & channels [ i ] . free_list ) ;
2022-02-17 13:12:29 +00:00
spin_lock_init ( & channels [ i ] . pending_lock ) ;
INIT_LIST_HEAD ( & channels [ i ] . pending_cmds_list ) ;
2021-08-03 14:10:24 +01:00
channels [ i ] . vqueue = vqs [ i ] ;
sz = virtqueue_get_vring_size ( channels [ i ] . vqueue ) ;
/* Tx messages need multiple descriptors. */
if ( ! channels [ i ] . is_rx )
sz / = DESCRIPTORS_PER_TX_MSG ;
if ( sz > MSG_TOKEN_MAX ) {
2021-09-16 11:33:34 +01:00
dev_info ( dev ,
" %s virtqueue could hold %d messages. Only %ld allowed to be pending. \n " ,
channels [ i ] . is_rx ? " rx " : " tx " ,
sz , MSG_TOKEN_MAX ) ;
2021-08-03 14:10:24 +01:00
sz = MSG_TOKEN_MAX ;
}
channels [ i ] . max_msg = sz ;
}
vdev - > priv = channels ;
2021-09-16 11:33:36 +01:00
/* Ensure initialized scmi_vdev is visible */
smp_store_mb ( scmi_vdev , vdev ) ;
2021-08-03 14:10:24 +01:00
return 0 ;
}
static void scmi_vio_remove ( struct virtio_device * vdev )
{
2021-09-16 11:33:35 +01:00
/*
* Once we get here , virtio_chan_free ( ) will have already been called by
* the SCMI core for any existing channel and , as a consequence , all the
* virtio channels will have been already marked NOT ready , causing any
* outstanding message on any vqueue to be ignored by complete_cb : now
* we can just stop processing buffers and destroy the vqueues .
*/
2021-10-13 06:55:44 -04:00
virtio_reset_device ( vdev ) ;
2021-08-03 14:10:24 +01:00
vdev - > config - > del_vqs ( vdev ) ;
2021-09-16 11:33:36 +01:00
/* Ensure scmi_vdev is visible as NULL */
smp_store_mb ( scmi_vdev , NULL ) ;
2021-08-03 14:10:24 +01:00
}
static int scmi_vio_validate ( struct virtio_device * vdev )
{
2021-11-15 10:29:10 +00:00
# ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
2021-08-03 14:10:24 +01:00
if ( ! virtio_has_feature ( vdev , VIRTIO_F_VERSION_1 ) ) {
dev_err ( & vdev - > dev ,
" device does not comply with spec version 1.x \n " ) ;
return - EINVAL ;
}
2021-11-15 10:29:10 +00:00
# endif
2021-08-03 14:10:24 +01:00
return 0 ;
}
static unsigned int features [ ] = {
VIRTIO_SCMI_F_P2A_CHANNELS ,
} ;
static const struct virtio_device_id id_table [ ] = {
{ VIRTIO_ID_SCMI , VIRTIO_DEV_ANY_ID } ,
{ 0 }
} ;
static struct virtio_driver virtio_scmi_driver = {
. driver . name = " scmi-virtio " ,
. driver . owner = THIS_MODULE ,
. feature_table = features ,
. feature_table_size = ARRAY_SIZE ( features ) ,
. id_table = id_table ,
. probe = scmi_vio_probe ,
. remove = scmi_vio_remove ,
. validate = scmi_vio_validate ,
} ;
static int __init virtio_scmi_init ( void )
{
return register_virtio_driver ( & virtio_scmi_driver ) ;
}
2021-09-20 12:02:52 +02:00
static void virtio_scmi_exit ( void )
2021-08-03 14:10:24 +01:00
{
unregister_virtio_driver ( & virtio_scmi_driver ) ;
}
const struct scmi_desc scmi_virtio_desc = {
. transport_init = virtio_scmi_init ,
. transport_exit = virtio_scmi_exit ,
. ops = & scmi_virtio_ops ,
2022-02-17 13:12:27 +00:00
/* for non-realtime virtio devices */
. max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS ,
2021-08-03 14:10:24 +01:00
. max_msg = 0 , /* overridden by virtio_get_max_msg() */
. max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE ,
2022-02-17 13:12:29 +00:00
. atomic_enabled = IS_ENABLED ( CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE ) ,
2021-08-03 14:10:24 +01:00
} ;