2018-11-29 11:37:09 +08:00
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2018 MediaTek Inc.
# include <linux/completion.h>
# include <linux/errno.h>
# include <linux/dma-mapping.h>
# include <linux/module.h>
# include <linux/mailbox_controller.h>
# include <linux/soc/mediatek/mtk-cmdq.h>
# define CMDQ_WRITE_ENABLE_MASK BIT(0)
2019-11-21 09:54:08 +08:00
# define CMDQ_POLL_ENABLE_MASK BIT(0)
2018-11-29 11:37:09 +08:00
# define CMDQ_EOC_IRQ_EN BIT(0)
2020-06-21 22:18:26 +08:00
# define CMDQ_REG_TYPE 1
2020-07-07 23:45:12 +08:00
# define CMDQ_JUMP_RELATIVE 1
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
struct cmdq_instruction {
union {
u32 value ;
u32 mask ;
2020-07-07 23:45:07 +08:00
struct {
u16 arg_c ;
u16 src_reg ;
} ;
2019-11-21 09:54:07 +08:00
} ;
union {
u16 offset ;
u16 event ;
2020-06-21 22:18:26 +08:00
u16 reg_dst ;
} ;
union {
u8 subsys ;
struct {
u8 sop : 5 ;
u8 arg_c_t : 1 ;
u8 src_t : 1 ;
u8 dst_t : 1 ;
} ;
2019-11-21 09:54:07 +08:00
} ;
u8 op ;
} ;
2019-11-21 09:54:09 +08:00
int cmdq_dev_get_client_reg ( struct device * dev ,
struct cmdq_client_reg * client_reg , int idx )
{
struct of_phandle_args spec ;
int err ;
if ( ! client_reg )
return - ENOENT ;
err = of_parse_phandle_with_fixed_args ( dev - > of_node ,
" mediatek,gce-client-reg " ,
3 , idx , & spec ) ;
if ( err < 0 ) {
dev_err ( dev ,
" error %d can't parse gce-client-reg property (%d) " ,
err , idx ) ;
return err ;
}
client_reg - > subsys = ( u8 ) spec . args [ 0 ] ;
client_reg - > offset = ( u16 ) spec . args [ 1 ] ;
client_reg - > size = ( u16 ) spec . args [ 2 ] ;
of_node_put ( spec . np ) ;
return 0 ;
}
EXPORT_SYMBOL ( cmdq_dev_get_client_reg ) ;
2020-11-02 08:04:38 +08:00
struct cmdq_client * cmdq_mbox_create ( struct device * dev , int index )
2018-11-29 11:37:09 +08:00
{
struct cmdq_client * client ;
client = kzalloc ( sizeof ( * client ) , GFP_KERNEL ) ;
if ( ! client )
return ( struct cmdq_client * ) - ENOMEM ;
client - > client . dev = dev ;
client - > client . tx_block = false ;
2020-02-14 12:35:45 +08:00
client - > client . knows_txdone = true ;
2018-11-29 11:37:09 +08:00
client - > chan = mbox_request_channel ( & client - > client , index ) ;
if ( IS_ERR ( client - > chan ) ) {
long err ;
dev_err ( dev , " failed to request channel \n " ) ;
err = PTR_ERR ( client - > chan ) ;
kfree ( client ) ;
return ERR_PTR ( err ) ;
}
return client ;
}
EXPORT_SYMBOL ( cmdq_mbox_create ) ;
void cmdq_mbox_destroy ( struct cmdq_client * client )
{
mbox_free_channel ( client - > chan ) ;
kfree ( client ) ;
}
EXPORT_SYMBOL ( cmdq_mbox_destroy ) ;
struct cmdq_pkt * cmdq_pkt_create ( struct cmdq_client * client , size_t size )
{
struct cmdq_pkt * pkt ;
struct device * dev ;
dma_addr_t dma_addr ;
pkt = kzalloc ( sizeof ( * pkt ) , GFP_KERNEL ) ;
if ( ! pkt )
return ERR_PTR ( - ENOMEM ) ;
pkt - > va_base = kzalloc ( size , GFP_KERNEL ) ;
if ( ! pkt - > va_base ) {
kfree ( pkt ) ;
return ERR_PTR ( - ENOMEM ) ;
}
pkt - > buf_size = size ;
pkt - > cl = ( void * ) client ;
dev = client - > chan - > mbox - > dev ;
dma_addr = dma_map_single ( dev , pkt - > va_base , pkt - > buf_size ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , dma_addr ) ) {
dev_err ( dev , " dma map failed, size=%u \n " , ( u32 ) ( u64 ) size ) ;
kfree ( pkt - > va_base ) ;
kfree ( pkt ) ;
return ERR_PTR ( - ENOMEM ) ;
}
pkt - > pa_base = dma_addr ;
return pkt ;
}
EXPORT_SYMBOL ( cmdq_pkt_create ) ;
void cmdq_pkt_destroy ( struct cmdq_pkt * pkt )
{
struct cmdq_client * client = ( struct cmdq_client * ) pkt - > cl ;
dma_unmap_single ( client - > chan - > mbox - > dev , pkt - > pa_base , pkt - > buf_size ,
DMA_TO_DEVICE ) ;
kfree ( pkt - > va_base ) ;
kfree ( pkt ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_destroy ) ;
2019-11-21 09:54:07 +08:00
static int cmdq_pkt_append_command ( struct cmdq_pkt * pkt ,
struct cmdq_instruction inst )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction * cmd_ptr ;
2018-11-29 11:37:09 +08:00
if ( unlikely ( pkt - > cmd_buf_size + CMDQ_INST_SIZE > pkt - > buf_size ) ) {
/*
* In the case of allocated buffer size ( pkt - > buf_size ) is used
* up , the real required size ( pkt - > cmdq_buf_size ) is still
* increased , so that the user knows how much memory should be
* ultimately allocated after appending all commands and
* flushing the command packet . Therefor , the user can call
* cmdq_pkt_create ( ) again with the real required buffer size .
*/
pkt - > cmd_buf_size + = CMDQ_INST_SIZE ;
WARN_ONCE ( 1 , " %s: buffer size %u is too small ! \n " ,
__func__ , ( u32 ) pkt - > buf_size ) ;
return - ENOMEM ;
}
2019-11-21 09:54:07 +08:00
2018-11-29 11:37:09 +08:00
cmd_ptr = pkt - > va_base + pkt - > cmd_buf_size ;
2019-11-21 09:54:07 +08:00
* cmd_ptr = inst ;
2018-11-29 11:37:09 +08:00
pkt - > cmd_buf_size + = CMDQ_INST_SIZE ;
return 0 ;
}
2019-08-20 16:49:28 +08:00
int cmdq_pkt_write ( struct cmdq_pkt * pkt , u8 subsys , u16 offset , u32 value )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst ;
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_WRITE ;
inst . value = value ;
inst . offset = offset ;
inst . subsys = subsys ;
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
return cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
}
EXPORT_SYMBOL ( cmdq_pkt_write ) ;
2019-08-20 16:49:28 +08:00
int cmdq_pkt_write_mask ( struct cmdq_pkt * pkt , u8 subsys ,
u16 offset , u32 value , u32 mask )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
u16 offset_mask = offset ;
2019-11-21 09:54:06 +08:00
int err ;
2018-11-29 11:37:09 +08:00
if ( mask ! = 0xffffffff ) {
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_MASK ;
inst . mask = ~ mask ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
2019-11-21 09:54:06 +08:00
if ( err < 0 )
return err ;
2018-11-29 11:37:09 +08:00
offset_mask | = CMDQ_WRITE_ENABLE_MASK ;
}
2019-11-21 09:54:06 +08:00
err = cmdq_pkt_write ( pkt , subsys , offset_mask , value ) ;
2018-11-29 11:37:09 +08:00
return err ;
}
EXPORT_SYMBOL ( cmdq_pkt_write_mask ) ;
2020-07-07 23:45:09 +08:00
int cmdq_pkt_read_s ( struct cmdq_pkt * pkt , u16 high_addr_reg_idx , u16 addr_low ,
u16 reg_idx )
{
struct cmdq_instruction inst = { } ;
inst . op = CMDQ_CODE_READ_S ;
inst . dst_t = CMDQ_REG_TYPE ;
inst . sop = high_addr_reg_idx ;
inst . reg_dst = reg_idx ;
inst . src_reg = addr_low ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_read_s ) ;
2020-07-07 23:45:07 +08:00
int cmdq_pkt_write_s ( struct cmdq_pkt * pkt , u16 high_addr_reg_idx ,
u16 addr_low , u16 src_reg_idx )
{
struct cmdq_instruction inst = { } ;
inst . op = CMDQ_CODE_WRITE_S ;
inst . src_t = CMDQ_REG_TYPE ;
inst . sop = high_addr_reg_idx ;
inst . offset = addr_low ;
inst . src_reg = src_reg_idx ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_write_s ) ;
2020-07-07 23:45:08 +08:00
int cmdq_pkt_write_s_mask ( struct cmdq_pkt * pkt , u16 high_addr_reg_idx ,
u16 addr_low , u16 src_reg_idx , u32 mask )
{
struct cmdq_instruction inst = { } ;
int err ;
inst . op = CMDQ_CODE_MASK ;
inst . mask = ~ mask ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
if ( err < 0 )
return err ;
inst . mask = 0 ;
inst . op = CMDQ_CODE_WRITE_S_MASK ;
inst . src_t = CMDQ_REG_TYPE ;
inst . sop = high_addr_reg_idx ;
inst . offset = addr_low ;
inst . src_reg = src_reg_idx ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_write_s_mask ) ;
2020-07-07 23:45:10 +08:00
int cmdq_pkt_write_s_value ( struct cmdq_pkt * pkt , u8 high_addr_reg_idx ,
u16 addr_low , u32 value )
{
struct cmdq_instruction inst = { } ;
inst . op = CMDQ_CODE_WRITE_S ;
inst . sop = high_addr_reg_idx ;
inst . offset = addr_low ;
inst . value = value ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_write_s_value ) ;
2020-07-07 23:45:11 +08:00
int cmdq_pkt_write_s_mask_value ( struct cmdq_pkt * pkt , u8 high_addr_reg_idx ,
u16 addr_low , u32 value , u32 mask )
{
struct cmdq_instruction inst = { } ;
int err ;
inst . op = CMDQ_CODE_MASK ;
inst . mask = ~ mask ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
if ( err < 0 )
return err ;
inst . op = CMDQ_CODE_WRITE_S_MASK ;
inst . sop = high_addr_reg_idx ;
inst . offset = addr_low ;
inst . value = value ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_write_s_mask_value ) ;
2020-07-07 23:45:13 +08:00
int cmdq_pkt_wfe ( struct cmdq_pkt * pkt , u16 event , bool clear )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
2020-07-07 23:45:13 +08:00
u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0 ;
2018-11-29 11:37:09 +08:00
if ( event > = CMDQ_MAX_EVENT )
return - EINVAL ;
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_WFE ;
2020-07-07 23:45:13 +08:00
inst . value = CMDQ_WFE_OPTION | clear_option ;
2019-11-21 09:54:07 +08:00
inst . event = event ;
2018-11-29 11:37:09 +08:00
2019-11-21 09:54:07 +08:00
return cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
}
EXPORT_SYMBOL ( cmdq_pkt_wfe ) ;
2019-08-20 16:49:28 +08:00
int cmdq_pkt_clear_event ( struct cmdq_pkt * pkt , u16 event )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
2018-11-29 11:37:09 +08:00
if ( event > = CMDQ_MAX_EVENT )
return - EINVAL ;
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_WFE ;
inst . value = CMDQ_WFE_UPDATE ;
inst . event = event ;
return cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
}
EXPORT_SYMBOL ( cmdq_pkt_clear_event ) ;
2020-06-21 22:18:35 +08:00
int cmdq_pkt_set_event ( struct cmdq_pkt * pkt , u16 event )
{
struct cmdq_instruction inst = { } ;
if ( event > = CMDQ_MAX_EVENT )
return - EINVAL ;
inst . op = CMDQ_CODE_WFE ;
inst . value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE ;
inst . event = event ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_set_event ) ;
2019-11-21 09:54:08 +08:00
int cmdq_pkt_poll ( struct cmdq_pkt * pkt , u8 subsys ,
u16 offset , u32 value )
{
struct cmdq_instruction inst = { { 0 } } ;
int err ;
inst . op = CMDQ_CODE_POLL ;
inst . value = value ;
inst . offset = offset ;
inst . subsys = subsys ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
return err ;
}
EXPORT_SYMBOL ( cmdq_pkt_poll ) ;
int cmdq_pkt_poll_mask ( struct cmdq_pkt * pkt , u8 subsys ,
u16 offset , u32 value , u32 mask )
{
struct cmdq_instruction inst = { { 0 } } ;
int err ;
inst . op = CMDQ_CODE_MASK ;
inst . mask = ~ mask ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
if ( err < 0 )
return err ;
offset = offset | CMDQ_POLL_ENABLE_MASK ;
err = cmdq_pkt_poll ( pkt , subsys , offset , value ) ;
return err ;
}
EXPORT_SYMBOL ( cmdq_pkt_poll_mask ) ;
2020-06-21 22:18:26 +08:00
int cmdq_pkt_assign ( struct cmdq_pkt * pkt , u16 reg_idx , u32 value )
{
struct cmdq_instruction inst = { } ;
inst . op = CMDQ_CODE_LOGIC ;
inst . dst_t = CMDQ_REG_TYPE ;
inst . reg_dst = reg_idx ;
inst . value = value ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_assign ) ;
2020-07-07 23:45:12 +08:00
int cmdq_pkt_jump ( struct cmdq_pkt * pkt , dma_addr_t addr )
{
struct cmdq_instruction inst = { } ;
inst . op = CMDQ_CODE_JUMP ;
inst . offset = CMDQ_JUMP_RELATIVE ;
inst . value = addr > >
cmdq_get_shift_pa ( ( ( struct cmdq_client * ) pkt - > cl ) - > chan ) ;
return cmdq_pkt_append_command ( pkt , inst ) ;
}
EXPORT_SYMBOL ( cmdq_pkt_jump ) ;
2020-06-21 22:18:32 +08:00
int cmdq_pkt_finalize ( struct cmdq_pkt * pkt )
2018-11-29 11:37:09 +08:00
{
2019-11-21 09:54:07 +08:00
struct cmdq_instruction inst = { { 0 } } ;
2018-11-29 11:37:09 +08:00
int err ;
/* insert EOC and generate IRQ for each command iteration */
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_EOC ;
inst . value = CMDQ_EOC_IRQ_EN ;
err = cmdq_pkt_append_command ( pkt , inst ) ;
2019-11-21 09:54:06 +08:00
if ( err < 0 )
return err ;
2018-11-29 11:37:09 +08:00
/* JUMP to end */
2019-11-21 09:54:07 +08:00
inst . op = CMDQ_CODE_JUMP ;
2020-07-07 23:45:06 +08:00
inst . value = CMDQ_JUMP_PASS > >
cmdq_get_shift_pa ( ( ( struct cmdq_client * ) pkt - > cl ) - > chan ) ;
2019-11-21 09:54:07 +08:00
err = cmdq_pkt_append_command ( pkt , inst ) ;
2018-11-29 11:37:09 +08:00
return err ;
}
2020-06-21 22:18:32 +08:00
EXPORT_SYMBOL ( cmdq_pkt_finalize ) ;
2018-11-29 11:37:09 +08:00
static void cmdq_pkt_flush_async_cb ( struct cmdq_cb_data data )
{
struct cmdq_pkt * pkt = ( struct cmdq_pkt * ) data . data ;
struct cmdq_task_cb * cb = & pkt - > cb ;
struct cmdq_client * client = ( struct cmdq_client * ) pkt - > cl ;
dma_sync_single_for_cpu ( client - > chan - > mbox - > dev , pkt - > pa_base ,
pkt - > cmd_buf_size , DMA_TO_DEVICE ) ;
if ( cb - > cb ) {
data . data = cb - > data ;
cb - > cb ( data ) ;
}
}
int cmdq_pkt_flush_async ( struct cmdq_pkt * pkt , cmdq_async_flush_cb cb ,
void * data )
{
int err ;
struct cmdq_client * client = ( struct cmdq_client * ) pkt - > cl ;
pkt - > cb . cb = cb ;
pkt - > cb . data = data ;
pkt - > async_cb . cb = cmdq_pkt_flush_async_cb ;
pkt - > async_cb . data = pkt ;
dma_sync_single_for_device ( client - > chan - > mbox - > dev , pkt - > pa_base ,
pkt - > cmd_buf_size , DMA_TO_DEVICE ) ;
2020-03-08 18:52:47 +08:00
err = mbox_send_message ( client - > chan , pkt ) ;
if ( err < 0 )
return err ;
2018-11-29 11:37:09 +08:00
/* We can send next packet immediately, so just call txdone. */
mbox_client_txdone ( client - > chan , 0 ) ;
return 0 ;
}
EXPORT_SYMBOL ( cmdq_pkt_flush_async ) ;
MODULE_LICENSE ( " GPL v2 " ) ;