2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2015-10-16 10:21:30 +03:00
/*
* Copyright ( C ) 2015 ST Microelectronics
*
* Author : Lee Jones < lee . jones @ linaro . org >
*/
# include <linux/debugfs.h>
# include <linux/err.h>
2016-11-29 17:37:04 +03:00
# include <linux/fs.h>
2015-10-20 19:11:27 +03:00
# include <linux/io.h>
2015-10-16 10:21:30 +03:00
# include <linux/kernel.h>
# include <linux/mailbox_client.h>
# include <linux/module.h>
2023-04-20 10:27:18 +03:00
# include <linux/mutex.h>
2015-10-16 10:21:30 +03:00
# include <linux/of.h>
# include <linux/platform_device.h>
2016-11-29 17:37:04 +03:00
# include <linux/poll.h>
2015-10-16 10:21:30 +03:00
# include <linux/slab.h>
2023-04-20 10:27:17 +03:00
# include <linux/spinlock.h>
2015-10-16 10:21:30 +03:00
# include <linux/uaccess.h>
2017-02-02 21:15:33 +03:00
# include <linux/sched/signal.h>
2015-10-16 10:21:30 +03:00
# define MBOX_MAX_SIG_LEN 8
# define MBOX_MAX_MSG_LEN 128
# define MBOX_BYTES_PER_LINE 16
2015-10-16 15:32:47 +03:00
# define MBOX_HEXDUMP_LINE_LEN ((MBOX_BYTES_PER_LINE * 4) + 2)
2015-10-16 10:21:30 +03:00
# define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
( MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE ) )
2017-09-28 13:18:53 +03:00
static bool mbox_data_ready ;
2015-10-16 10:21:30 +03:00
struct mbox_test_device {
struct device * dev ;
2016-02-19 19:01:18 +03:00
void __iomem * tx_mmio ;
void __iomem * rx_mmio ;
2015-10-16 10:21:30 +03:00
struct mbox_chan * tx_channel ;
struct mbox_chan * rx_channel ;
char * rx_buffer ;
char * signal ;
char * message ;
spinlock_t lock ;
2023-04-20 10:27:18 +03:00
struct mutex mutex ;
2016-11-29 17:37:04 +03:00
wait_queue_head_t waitq ;
struct fasync_struct * async_queue ;
2019-01-04 16:47:15 +03:00
struct dentry * root_debugfs_dir ;
2015-10-16 10:21:30 +03:00
} ;
static ssize_t mbox_test_signal_write ( struct file * filp ,
const char __user * userbuf ,
size_t count , loff_t * ppos )
{
struct mbox_test_device * tdev = filp - > private_data ;
if ( ! tdev - > tx_channel ) {
dev_err ( tdev - > dev , " Channel cannot do Tx \n " ) ;
return - EINVAL ;
}
if ( count > MBOX_MAX_SIG_LEN ) {
dev_err ( tdev - > dev ,
2015-10-16 15:32:47 +03:00
" Signal length %zd greater than max allowed %d \n " ,
2015-10-16 10:21:30 +03:00
count , MBOX_MAX_SIG_LEN ) ;
return - EINVAL ;
}
2016-03-23 17:43:42 +03:00
/* Only allocate memory if we need to */
if ( ! tdev - > signal ) {
tdev - > signal = kzalloc ( MBOX_MAX_SIG_LEN , GFP_KERNEL ) ;
if ( ! tdev - > signal )
return - ENOMEM ;
}
2015-10-16 10:21:30 +03:00
2016-03-23 17:43:41 +03:00
if ( copy_from_user ( tdev - > signal , userbuf , count ) ) {
2015-10-16 10:21:30 +03:00
kfree ( tdev - > signal ) ;
2016-03-23 17:43:41 +03:00
tdev - > signal = NULL ;
2015-10-16 10:21:30 +03:00
return - EFAULT ;
}
2016-03-23 17:43:41 +03:00
return count ;
2015-10-16 10:21:30 +03:00
}
static const struct file_operations mbox_test_signal_ops = {
. write = mbox_test_signal_write ,
. open = simple_open ,
. llseek = generic_file_llseek ,
} ;
2016-11-29 17:37:04 +03:00
static int mbox_test_message_fasync ( int fd , struct file * filp , int on )
{
struct mbox_test_device * tdev = filp - > private_data ;
return fasync_helper ( fd , filp , on , & tdev - > async_queue ) ;
}
2015-10-16 10:21:30 +03:00
static ssize_t mbox_test_message_write ( struct file * filp ,
const char __user * userbuf ,
size_t count , loff_t * ppos )
{
struct mbox_test_device * tdev = filp - > private_data ;
2023-05-05 12:22:09 +03:00
char * message ;
2015-10-16 10:21:30 +03:00
void * data ;
int ret ;
if ( ! tdev - > tx_channel ) {
dev_err ( tdev - > dev , " Channel cannot do Tx \n " ) ;
return - EINVAL ;
}
if ( count > MBOX_MAX_MSG_LEN ) {
dev_err ( tdev - > dev ,
2015-10-16 15:32:47 +03:00
" Message length %zd greater than max allowed %d \n " ,
2015-10-16 10:21:30 +03:00
count , MBOX_MAX_MSG_LEN ) ;
return - EINVAL ;
}
2023-05-05 12:22:09 +03:00
message = kzalloc ( MBOX_MAX_MSG_LEN , GFP_KERNEL ) ;
if ( ! message )
2015-10-16 10:21:30 +03:00
return - ENOMEM ;
2023-05-05 12:22:09 +03:00
mutex_lock ( & tdev - > mutex ) ;
tdev - > message = message ;
2015-10-16 10:21:30 +03:00
ret = copy_from_user ( tdev - > message , userbuf , count ) ;
if ( ret ) {
ret = - EFAULT ;
goto out ;
}
/*
* A separate signal is only of use if there is
* MMIO to subsequently pass the message through
*/
2016-02-19 19:01:18 +03:00
if ( tdev - > tx_mmio & & tdev - > signal ) {
2016-02-19 19:01:17 +03:00
print_hex_dump_bytes ( " Client: Sending: Signal: " , DUMP_PREFIX_ADDRESS ,
tdev - > signal , MBOX_MAX_SIG_LEN ) ;
2015-10-16 10:21:30 +03:00
data = tdev - > signal ;
} else
data = tdev - > message ;
2016-02-19 19:01:17 +03:00
print_hex_dump_bytes ( " Client: Sending: Message: " , DUMP_PREFIX_ADDRESS ,
tdev - > message , MBOX_MAX_MSG_LEN ) ;
2015-10-16 10:21:30 +03:00
ret = mbox_send_message ( tdev - > tx_channel , data ) ;
if ( ret < 0 )
dev_err ( tdev - > dev , " Failed to send message via mailbox \n " ) ;
out :
kfree ( tdev - > signal ) ;
kfree ( tdev - > message ) ;
2016-05-24 19:12:04 +03:00
tdev - > signal = NULL ;
2015-10-16 10:21:30 +03:00
2023-04-20 10:27:18 +03:00
mutex_unlock ( & tdev - > mutex ) ;
2015-10-16 10:21:30 +03:00
return ret < 0 ? ret : count ;
}
2016-11-29 17:37:04 +03:00
static bool mbox_test_message_data_ready ( struct mbox_test_device * tdev )
{
2017-09-28 13:18:53 +03:00
bool data_ready ;
2016-11-29 17:37:04 +03:00
unsigned long flags ;
spin_lock_irqsave ( & tdev - > lock , flags ) ;
2017-09-28 13:18:53 +03:00
data_ready = mbox_data_ready ;
2016-11-29 17:37:04 +03:00
spin_unlock_irqrestore ( & tdev - > lock , flags ) ;
2017-09-28 13:18:53 +03:00
return data_ready ;
2016-11-29 17:37:04 +03:00
}
2015-10-16 10:21:30 +03:00
static ssize_t mbox_test_message_read ( struct file * filp , char __user * userbuf ,
size_t count , loff_t * ppos )
{
struct mbox_test_device * tdev = filp - > private_data ;
unsigned long flags ;
char * touser , * ptr ;
int l = 0 ;
int ret ;
2016-11-29 17:37:04 +03:00
DECLARE_WAITQUEUE ( wait , current ) ;
2015-10-22 22:51:27 +03:00
touser = kzalloc ( MBOX_HEXDUMP_MAX_LEN + 1 , GFP_KERNEL ) ;
2015-10-16 10:21:30 +03:00
if ( ! touser )
return - ENOMEM ;
if ( ! tdev - > rx_channel ) {
ret = snprintf ( touser , 20 , " <NO RX CAPABILITY> \n " ) ;
ret = simple_read_from_buffer ( userbuf , count , ppos ,
touser , ret ) ;
2016-11-29 17:37:04 +03:00
goto kfree_err ;
2015-10-16 10:21:30 +03:00
}
2016-11-29 17:37:04 +03:00
add_wait_queue ( & tdev - > waitq , & wait ) ;
do {
__set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( mbox_test_message_data_ready ( tdev ) )
break ;
if ( filp - > f_flags & O_NONBLOCK ) {
ret = - EAGAIN ;
goto waitq_err ;
}
if ( signal_pending ( current ) ) {
ret = - ERESTARTSYS ;
goto waitq_err ;
}
schedule ( ) ;
} while ( 1 ) ;
2015-10-16 10:21:30 +03:00
spin_lock_irqsave ( & tdev - > lock , flags ) ;
ptr = tdev - > rx_buffer ;
while ( l < MBOX_HEXDUMP_MAX_LEN ) {
hex_dump_to_buffer ( ptr ,
MBOX_BYTES_PER_LINE ,
MBOX_BYTES_PER_LINE , 1 , touser + l ,
MBOX_HEXDUMP_LINE_LEN , true ) ;
ptr + = MBOX_BYTES_PER_LINE ;
l + = MBOX_HEXDUMP_LINE_LEN ;
* ( touser + ( l - 1 ) ) = ' \n ' ;
}
* ( touser + l ) = ' \0 ' ;
memset ( tdev - > rx_buffer , 0 , MBOX_MAX_MSG_LEN ) ;
2017-09-28 13:18:53 +03:00
mbox_data_ready = false ;
2015-10-16 10:21:30 +03:00
spin_unlock_irqrestore ( & tdev - > lock , flags ) ;
ret = simple_read_from_buffer ( userbuf , count , ppos , touser , MBOX_HEXDUMP_MAX_LEN ) ;
2016-11-29 17:37:04 +03:00
waitq_err :
__set_current_state ( TASK_RUNNING ) ;
remove_wait_queue ( & tdev - > waitq , & wait ) ;
kfree_err :
2015-10-16 10:21:30 +03:00
kfree ( touser ) ;
return ret ;
}
2017-07-03 13:39:46 +03:00
static __poll_t
2016-11-29 17:37:04 +03:00
mbox_test_message_poll ( struct file * filp , struct poll_table_struct * wait )
{
struct mbox_test_device * tdev = filp - > private_data ;
poll_wait ( filp , & tdev - > waitq , wait ) ;
if ( mbox_test_message_data_ready ( tdev ) )
2018-02-12 01:34:03 +03:00
return EPOLLIN | EPOLLRDNORM ;
2016-11-29 17:37:04 +03:00
return 0 ;
}
2015-10-16 10:21:30 +03:00
static const struct file_operations mbox_test_message_ops = {
. write = mbox_test_message_write ,
. read = mbox_test_message_read ,
2016-11-29 17:37:04 +03:00
. fasync = mbox_test_message_fasync ,
. poll = mbox_test_message_poll ,
2015-10-16 10:21:30 +03:00
. open = simple_open ,
. llseek = generic_file_llseek ,
} ;
static int mbox_test_add_debugfs ( struct platform_device * pdev ,
struct mbox_test_device * tdev )
{
if ( ! debugfs_initialized ( ) )
return 0 ;
2019-01-04 16:47:15 +03:00
tdev - > root_debugfs_dir = debugfs_create_dir ( dev_name ( & pdev - > dev ) , NULL ) ;
if ( ! tdev - > root_debugfs_dir ) {
2015-10-16 10:21:30 +03:00
dev_err ( & pdev - > dev , " Failed to create Mailbox debugfs \n " ) ;
return - EINVAL ;
}
2019-01-04 16:47:15 +03:00
debugfs_create_file ( " message " , 0600 , tdev - > root_debugfs_dir ,
2015-10-16 10:21:30 +03:00
tdev , & mbox_test_message_ops ) ;
2019-01-04 16:47:15 +03:00
debugfs_create_file ( " signal " , 0200 , tdev - > root_debugfs_dir ,
2015-10-16 10:21:30 +03:00
tdev , & mbox_test_signal_ops ) ;
return 0 ;
}
static void mbox_test_receive_message ( struct mbox_client * client , void * message )
{
struct mbox_test_device * tdev = dev_get_drvdata ( client - > dev ) ;
unsigned long flags ;
spin_lock_irqsave ( & tdev - > lock , flags ) ;
2016-02-19 19:01:18 +03:00
if ( tdev - > rx_mmio ) {
memcpy_fromio ( tdev - > rx_buffer , tdev - > rx_mmio , MBOX_MAX_MSG_LEN ) ;
2016-02-19 19:01:17 +03:00
print_hex_dump_bytes ( " Client: Received [MMIO]: " , DUMP_PREFIX_ADDRESS ,
tdev - > rx_buffer , MBOX_MAX_MSG_LEN ) ;
2015-10-16 10:21:30 +03:00
} else if ( message ) {
2016-02-19 19:01:17 +03:00
print_hex_dump_bytes ( " Client: Received [API]: " , DUMP_PREFIX_ADDRESS ,
message , MBOX_MAX_MSG_LEN ) ;
2015-10-16 10:21:30 +03:00
memcpy ( tdev - > rx_buffer , message , MBOX_MAX_MSG_LEN ) ;
}
2017-09-28 13:18:53 +03:00
mbox_data_ready = true ;
2015-10-16 10:21:30 +03:00
spin_unlock_irqrestore ( & tdev - > lock , flags ) ;
2016-11-29 17:37:04 +03:00
wake_up_interruptible ( & tdev - > waitq ) ;
kill_fasync ( & tdev - > async_queue , SIGIO , POLL_IN ) ;
2015-10-16 10:21:30 +03:00
}
static void mbox_test_prepare_message ( struct mbox_client * client , void * message )
{
struct mbox_test_device * tdev = dev_get_drvdata ( client - > dev ) ;
2016-02-19 19:01:18 +03:00
if ( tdev - > tx_mmio ) {
2015-10-16 10:21:30 +03:00
if ( tdev - > signal )
2016-02-19 19:01:18 +03:00
memcpy_toio ( tdev - > tx_mmio , tdev - > message , MBOX_MAX_MSG_LEN ) ;
2015-10-16 10:21:30 +03:00
else
2016-02-19 19:01:18 +03:00
memcpy_toio ( tdev - > tx_mmio , message , MBOX_MAX_MSG_LEN ) ;
2015-10-16 10:21:30 +03:00
}
}
static void mbox_test_message_sent ( struct mbox_client * client ,
void * message , int r )
{
if ( r )
dev_warn ( client - > dev ,
" Client: Message could not be sent: %d \n " , r ) ;
else
dev_info ( client - > dev ,
" Client: Message sent \n " ) ;
}
static struct mbox_chan *
mbox_test_request_channel ( struct platform_device * pdev , const char * name )
{
struct mbox_client * client ;
struct mbox_chan * channel ;
client = devm_kzalloc ( & pdev - > dev , sizeof ( * client ) , GFP_KERNEL ) ;
if ( ! client )
return ERR_PTR ( - ENOMEM ) ;
client - > dev = & pdev - > dev ;
client - > rx_callback = mbox_test_receive_message ;
client - > tx_prepare = mbox_test_prepare_message ;
client - > tx_done = mbox_test_message_sent ;
client - > tx_block = true ;
client - > knows_txdone = false ;
client - > tx_tout = 500 ;
channel = mbox_request_channel_byname ( client , name ) ;
if ( IS_ERR ( channel ) ) {
dev_warn ( & pdev - > dev , " Failed to request %s channel \n " , name ) ;
return NULL ;
}
return channel ;
}
static int mbox_test_probe ( struct platform_device * pdev )
{
struct mbox_test_device * tdev ;
struct resource * res ;
2016-11-29 17:37:05 +03:00
resource_size_t size ;
2015-10-16 10:21:30 +03:00
int ret ;
tdev = devm_kzalloc ( & pdev - > dev , sizeof ( * tdev ) , GFP_KERNEL ) ;
if ( ! tdev )
return - ENOMEM ;
/* It's okay for MMIO to be NULL */
2023-07-04 16:37:24 +03:00
tdev - > tx_mmio = devm_platform_get_and_ioremap_resource ( pdev , 0 , & res ) ;
2019-01-04 16:47:16 +03:00
if ( PTR_ERR ( tdev - > tx_mmio ) = = - EBUSY ) {
2016-11-29 17:37:05 +03:00
/* if reserved area in SRAM, try just ioremap */
2019-01-04 16:47:16 +03:00
size = resource_size ( res ) ;
2016-11-29 17:37:05 +03:00
tdev - > tx_mmio = devm_ioremap ( & pdev - > dev , res - > start , size ) ;
2019-01-04 16:47:16 +03:00
} else if ( IS_ERR ( tdev - > tx_mmio ) ) {
2016-02-19 19:01:18 +03:00
tdev - > tx_mmio = NULL ;
2019-01-04 16:47:16 +03:00
}
2016-02-19 19:01:18 +03:00
/* If specified, second reg entry is Rx MMIO */
2023-07-04 16:37:24 +03:00
tdev - > rx_mmio = devm_platform_get_and_ioremap_resource ( pdev , 1 , & res ) ;
2019-01-04 16:47:16 +03:00
if ( PTR_ERR ( tdev - > rx_mmio ) = = - EBUSY ) {
size = resource_size ( res ) ;
2016-11-29 17:37:05 +03:00
tdev - > rx_mmio = devm_ioremap ( & pdev - > dev , res - > start , size ) ;
2019-01-04 16:47:16 +03:00
} else if ( IS_ERR ( tdev - > rx_mmio ) ) {
2016-02-19 19:01:18 +03:00
tdev - > rx_mmio = tdev - > tx_mmio ;
2019-01-04 16:47:16 +03:00
}
2015-10-16 10:21:30 +03:00
tdev - > tx_channel = mbox_test_request_channel ( pdev , " tx " ) ;
tdev - > rx_channel = mbox_test_request_channel ( pdev , " rx " ) ;
2023-07-13 13:18:08 +03:00
if ( IS_ERR_OR_NULL ( tdev - > tx_channel ) & & IS_ERR_OR_NULL ( tdev - > rx_channel ) )
2015-10-16 10:21:30 +03:00
return - EPROBE_DEFER ;
2016-02-19 19:01:18 +03:00
/* If Rx is not specified but has Rx MMIO, then Rx = Tx */
if ( ! tdev - > rx_channel & & ( tdev - > rx_mmio ! = tdev - > tx_mmio ) )
tdev - > rx_channel = tdev - > tx_channel ;
2015-10-16 10:21:30 +03:00
tdev - > dev = & pdev - > dev ;
platform_set_drvdata ( pdev , tdev ) ;
spin_lock_init ( & tdev - > lock ) ;
2023-04-20 10:27:18 +03:00
mutex_init ( & tdev - > mutex ) ;
2015-10-16 10:21:30 +03:00
if ( tdev - > rx_channel ) {
tdev - > rx_buffer = devm_kzalloc ( & pdev - > dev ,
MBOX_MAX_MSG_LEN , GFP_KERNEL ) ;
if ( ! tdev - > rx_buffer )
return - ENOMEM ;
}
ret = mbox_test_add_debugfs ( pdev , tdev ) ;
if ( ret )
return ret ;
2016-11-29 17:37:04 +03:00
init_waitqueue_head ( & tdev - > waitq ) ;
2015-10-16 10:21:30 +03:00
dev_info ( & pdev - > dev , " Successfully registered \n " ) ;
return 0 ;
}
2023-12-28 00:02:32 +03:00
static void mbox_test_remove ( struct platform_device * pdev )
2015-10-16 10:21:30 +03:00
{
struct mbox_test_device * tdev = platform_get_drvdata ( pdev ) ;
2019-01-04 16:47:15 +03:00
debugfs_remove_recursive ( tdev - > root_debugfs_dir ) ;
2015-10-16 10:21:30 +03:00
if ( tdev - > tx_channel )
mbox_free_channel ( tdev - > tx_channel ) ;
if ( tdev - > rx_channel )
mbox_free_channel ( tdev - > rx_channel ) ;
}
static const struct of_device_id mbox_test_match [ ] = {
2016-02-19 19:01:16 +03:00
{ . compatible = " mailbox-test " } ,
2015-10-16 10:21:30 +03:00
{ } ,
} ;
2016-10-20 06:34:04 +03:00
MODULE_DEVICE_TABLE ( of , mbox_test_match ) ;
2015-10-16 10:21:30 +03:00
static struct platform_driver mbox_test_driver = {
. driver = {
2016-02-19 19:01:15 +03:00
. name = " mailbox_test " ,
2015-10-16 10:21:30 +03:00
. of_match_table = mbox_test_match ,
} ,
. probe = mbox_test_probe ,
2023-12-28 00:02:32 +03:00
. remove_new = mbox_test_remove ,
2015-10-16 10:21:30 +03:00
} ;
module_platform_driver ( mbox_test_driver ) ;
MODULE_DESCRIPTION ( " Generic Mailbox Testing Facility " ) ;
MODULE_AUTHOR ( " Lee Jones <lee.jones@linaro.org " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;