2018-11-29 11:37:09 +08:00
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2018 MediaTek Inc.
# include <linux/completion.h>
# include <linux/errno.h>
# include <linux/dma-mapping.h>
# include <linux/module.h>
# include <linux/mailbox_controller.h>
# include <linux/soc/mediatek/mtk-cmdq.h>
# define CMDQ_WRITE_ENABLE_MASK BIT(0)
2019-11-21 09:54:08 +08:00
# define CMDQ_POLL_ENABLE_MASK BIT(0)
2018-11-29 11:37:09 +08:00
# define CMDQ_EOC_IRQ_EN BIT(0)
2019-11-21 09:54:07 +08:00
struct cmdq_instruction {
union {
u32 value ;
u32 mask ;
} ;
union {
u16 offset ;
u16 event ;
} ;
u8 subsys ;
u8 op ;
} ;
2019-11-21 09:54:09 +08:00
int cmdq_dev_get_client_reg ( struct device * dev ,
struct cmdq_client_reg * client_reg , int idx )
{
struct of_phandle_args spec ;
int err ;
if ( ! client_reg )
return - ENOENT ;
err = of_parse_phandle_with_fixed_args ( dev - > of_node ,
" mediatek,gce-client-reg " ,
3 , idx , & spec ) ;
if ( err < 0 ) {
dev_err ( dev ,
" error %d can't parse gce-client-reg property (%d) " ,
err , idx ) ;
return err ;
}
client_reg - > subsys = ( u8 ) spec . args [ 0 ] ;
client_reg - > offset = ( u16 ) spec . args [ 1 ] ;
client_reg - > size = ( u16 ) spec . args [ 2 ] ;
of_node_put ( spec . np ) ;
return 0 ;
}
EXPORT_SYMBOL ( cmdq_dev_get_client_reg ) ;
2018-11-29 11:37:09 +08:00
static void cmdq_client_timeout ( struct timer_list * t )
{
struct cmdq_client * client = from_timer ( client , t , timer ) ;
dev_err ( client - > client . dev , " cmdq timeout! \n " ) ;
}
struct cmdq_client * cmdq_mbox_create ( struct device * dev , int index , u32 timeout )
{
struct cmdq_client * client ;
client = kzalloc ( sizeof ( * client ) , GFP_KERNEL ) ;
if ( ! client )
return ( struct cmdq_client * ) - ENOMEM ;
client - > timeout_ms = timeout ;
if ( timeout ! = CMDQ_NO_TIMEOUT ) {
spin_lock_init ( & client - > lock ) ;
timer_setup ( & client - > timer , cmdq_client_timeout , 0 ) ;
}
client - > pkt_cnt = 0 ;
client - > client . dev = dev ;
client - > client . tx_block = false ;
2020-02-14 12:35:45 +08:00
client - > client . knows_txdone = true ;
2018-11-29 11:37:09 +08:00
client - > chan = mbox_request_channel ( & client - > client , index ) ;
if ( IS_ERR ( client - > chan ) ) {
long err ;
dev_err ( dev , " failed to request channel \n " ) ;
err = PTR_ERR ( client - > chan ) ;
kfree ( client ) ;
return ERR_PTR ( err ) ;
}
return client ;
}
EXPORT_SYMBOL ( cmdq_mbox_create ) ;
void cmdq_mbox_destroy ( struct cmdq_client * client )
{
if ( client - > timeout_ms ! = CMDQ_NO_TIMEOUT ) {
spin_lock ( & client - > lock ) ;
del_timer_sync ( & client - > timer ) ;
spin_unlock ( & client - > lock ) ;
}
mbox_free_channel ( client - > chan ) ;
kfree ( client ) ;
}
EXPORT_SYMBOL ( cmdq_mbox_destroy ) ;
struct cmdq_pkt * cmdq_pkt_create ( struct cmdq_client * client , size_t size )
{
struct cmdq_pkt * pkt ;
struct device * dev ;
dma_addr_t dma_addr ;
pkt = kzalloc ( sizeof ( * pkt ) , GFP_KERNEL ) ;
if ( ! pkt )
return ERR_PTR ( - ENOMEM ) ;
pkt - > va_base = kzalloc ( size , GFP_KERNEL ) ;
if ( ! pkt - > va_base ) {
kfree ( pkt ) ;
return ERR_PTR ( - ENOMEM ) ;
}
pkt - > buf_size = size ;
pkt - > cl = ( void * ) client ;
dev = client - > chan - > mbox - > dev ;
dma_addr = dma_map_single ( dev , pkt - > va_base , pkt - > buf_size ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , dma_addr ) ) {
dev_err ( dev , " dma map failed, size=%u \n " , ( u32 ) ( u64 ) size ) ;
kfree ( pkt - > va_base ) ;
kfree ( pkt ) ;
return ERR_PTR ( - ENOMEM ) ;
}
pkt - > pa_base = dma_addr ;
return pkt ;
}
EXPORT_SYMBOL ( cmdq_pkt_create ) ;
void cmdq_pkt_destroy ( struct cmdq_pkt * pkt )
{
struct cmdq_client * client = ( struct cmdq_client * ) pkt - > cl ;
dma_unmap_single ( client - > chan - > mbox - > dev , pkt - > pa_base , pkt - > buf_size ,
DMA_TO_DEVICE ) ;
kfree ( pkt - > va_base ) ;
kfree ( pkt ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_destroy ) ;
2019-11-21 09:54:07 +08:00
static int cmdq_pkt_append_command ( struct cmdq_pkt * pkt ,
struct cmdq_instruction inst )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction * cmd_ptr ;
2018-11-29 11:37:09 +08:00
if ( unlikely ( pkt - > cmd_buf_size + CMDQ_INST_SIZE > pkt - > buf_size ) ) {
/*
* In the case of allocated buffer size ( pkt - > buf_size ) is used
* up , the real required size ( pkt - > cmdq_buf_size ) is still
* increased , so that the user knows how much memory should be
* ultimately allocated after appending all commands and
* flushing the command packet . Therefor , the user can call
* cmdq_pkt_create ( ) again with the real required buffer size .
*/
pkt - > cmd_buf_size + = CMDQ_INST_SIZE ;
WARN_ONCE ( 1 , " %s: buffer size %u is too small ! \n " ,
__func__ , ( u32 ) pkt - > buf_size ) ;
return - ENOMEM ;
}
2019-11-21 09:54:07 +08:00
2018-11-29 11:37:09 +08:00
cmd_ptr = pkt - > va_base + pkt - > cmd_buf_size ;
2019-11-21 09:54:07 +08:00
* cmd_ptr = inst ;
2018-11-29 11:37:09 +08:00
pkt - > cmd_buf_size + = CMDQ_INST_SIZE ;
return 0 ;
}
2019-08-20 16:49:28 +08:00
int cmdq_pkt_write ( struct cmdq_pkt * pkt , u8 subsys , u16 offset , u32 value )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst ;
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_WRITE ;
inst . value = value ;
inst . offset = offset ;
inst . subsys = subsys ;
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
return cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
}
EXPORT_SYMBOL ( cmdq_pkt_write ) ;
2019-08-20 16:49:28 +08:00
int cmdq_pkt_write_mask ( struct cmdq_pkt * pkt , u8 subsys ,
u16 offset , u32 value , u32 mask )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
u16 offset_mask = offset ;
2019-11-21 09:54:06 +08:00
int err ;
2018-11-29 11:37:09 +08:00
if ( mask ! = 0xffffffff ) {
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_MASK ;
inst . mask = ~ mask ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
2019-11-21 09:54:06 +08:00
if ( err < 0 )
return err ;
2018-11-29 11:37:09 +08:00
offset_mask | = CMDQ_WRITE_ENABLE_MASK ;
}
2019-11-21 09:54:06 +08:00
err = cmdq_pkt_write ( pkt , subsys , offset_mask , value ) ;
2018-11-29 11:37:09 +08:00
return err ;
}
EXPORT_SYMBOL ( cmdq_pkt_write_mask ) ;
2019-08-20 16:49:28 +08:00
int cmdq_pkt_wfe ( struct cmdq_pkt * pkt , u16 event )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
2018-11-29 11:37:09 +08:00
if ( event > = CMDQ_MAX_EVENT )
return - EINVAL ;
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_WFE ;
inst . value = CMDQ_WFE_OPTION ;
inst . event = event ;
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
return cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
}
EXPORT_SYMBOL ( cmdq_pkt_wfe ) ;
2019-08-20 16:49:28 +08:00
int cmdq_pkt_clear_event ( struct cmdq_pkt * pkt , u16 event )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
2018-11-29 11:37:09 +08:00
if ( event > = CMDQ_MAX_EVENT )
return - EINVAL ;
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_WFE ;
inst . value = CMDQ_WFE_UPDATE ;
inst . event = event ;
return cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
}
EXPORT_SYMBOL ( cmdq_pkt_clear_event ) ;
2019-11-21 09:54:08 +08:00
int cmdq_pkt_poll ( struct cmdq_pkt * pkt , u8 subsys ,
u16 offset , u32 value )
{
struct cmdq_instruction inst = { { 0 } } ;
int err ;
inst . op = CMDQ_CODE_POLL ;
inst . value = value ;
inst . offset = offset ;
inst . subsys = subsys ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
return err ;
}
EXPORT_SYMBOL ( cmdq_pkt_poll ) ;
int cmdq_pkt_poll_mask ( struct cmdq_pkt * pkt , u8 subsys ,
u16 offset , u32 value , u32 mask )
{
struct cmdq_instruction inst = { { 0 } } ;
int err ;
inst . op = CMDQ_CODE_MASK ;
inst . mask = ~ mask ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
if ( err < 0 )
return err ;
offset = offset | CMDQ_POLL_ENABLE_MASK ;
err = cmdq_pkt_poll ( pkt , subsys , offset , value ) ;
return err ;
}
EXPORT_SYMBOL ( cmdq_pkt_poll_mask ) ;
2018-11-29 11:37:09 +08:00
static int cmdq_pkt_finalize ( struct cmdq_pkt * pkt )
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
2018-11-29 11:37:09 +08:00
int err ;
/* insert EOC and generate IRQ for each command iteration */
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_EOC ;
inst . value = CMDQ_EOC_IRQ_EN ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
2019-11-21 09:54:06 +08:00
if ( err < 0 )
return err ;
2018-11-29 11:37:09 +08:00
/* JUMP to end */
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_JUMP ;
inst . value = CMDQ_JUMP_PASS ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
return err ;
}
static void cmdq_pkt_flush_async_cb ( struct cmdq_cb_data data )
{
struct cmdq_pkt * pkt = ( struct cmdq_pkt * ) data . data ;
struct cmdq_task_cb * cb = & pkt - > cb ;
struct cmdq_client * client = ( struct cmdq_client * ) pkt - > cl ;
if ( client - > timeout_ms ! = CMDQ_NO_TIMEOUT ) {
unsigned long flags = 0 ;
spin_lock_irqsave ( & client - > lock , flags ) ;
if ( - - client - > pkt_cnt = = 0 )
del_timer ( & client - > timer ) ;
else
mod_timer ( & client - > timer , jiffies +
msecs_to_jiffies ( client - > timeout_ms ) ) ;
spin_unlock_irqrestore ( & client - > lock , flags ) ;
}
dma_sync_single_for_cpu ( client - > chan - > mbox - > dev , pkt - > pa_base ,
pkt - > cmd_buf_size , DMA_TO_DEVICE ) ;
if ( cb - > cb ) {
data . data = cb - > data ;
cb - > cb ( data ) ;
}
}
int cmdq_pkt_flush_async ( struct cmdq_pkt * pkt , cmdq_async_flush_cb cb ,
void * data )
{
int err ;
unsigned long flags = 0 ;
struct cmdq_client * client = ( struct cmdq_client * ) pkt - > cl ;
err = cmdq_pkt_finalize ( pkt ) ;
if ( err < 0 )
return err ;
pkt - > cb . cb = cb ;
pkt - > cb . data = data ;
pkt - > async_cb . cb = cmdq_pkt_flush_async_cb ;
pkt - > async_cb . data = pkt ;
dma_sync_single_for_device ( client - > chan - > mbox - > dev , pkt - > pa_base ,
pkt - > cmd_buf_size , DMA_TO_DEVICE ) ;
if ( client - > timeout_ms ! = CMDQ_NO_TIMEOUT ) {
spin_lock_irqsave ( & client - > lock , flags ) ;
if ( client - > pkt_cnt + + = = 0 )
mod_timer ( & client - > timer , jiffies +
msecs_to_jiffies ( client - > timeout_ms ) ) ;
spin_unlock_irqrestore ( & client - > lock , flags ) ;
}
2020-03-08 18:52:47 +08:00
err = mbox_send_message ( client - > chan , pkt ) ;
if ( err < 0 )
return err ;
2018-11-29 11:37:09 +08:00
/* We can send next packet immediately, so just call txdone. */
mbox_client_txdone ( client - > chan , 0 ) ;
return 0 ;
}
EXPORT_SYMBOL ( cmdq_pkt_flush_async ) ;
struct cmdq_flush_completion {
struct completion cmplt ;
bool err ;
} ;
static void cmdq_pkt_flush_cb ( struct cmdq_cb_data data )
{
struct cmdq_flush_completion * cmplt ;
cmplt = ( struct cmdq_flush_completion * ) data . data ;
if ( data . sta ! = CMDQ_CB_NORMAL )
cmplt - > err = true ;
else
cmplt - > err = false ;
complete ( & cmplt - > cmplt ) ;
}
int cmdq_pkt_flush ( struct cmdq_pkt * pkt )
{
struct cmdq_flush_completion cmplt ;
int err ;
init_completion ( & cmplt . cmplt ) ;
err = cmdq_pkt_flush_async ( pkt , cmdq_pkt_flush_cb , & cmplt ) ;
if ( err < 0 )
return err ;
wait_for_completion ( & cmplt . cmplt ) ;
return cmplt . err ? - EFAULT : 0 ;
}
EXPORT_SYMBOL ( cmdq_pkt_flush ) ;
MODULE_LICENSE ( " GPL v2 " ) ;