2019-06-04 15:29:24 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( c ) 2018 - 2019 Synopsys , Inc . and / or its affiliates .
* Synopsys DesignWare eDMA v0 core
*
* Author : Gustavo Pimentel < gustavo . pimentel @ synopsys . com >
*/
# include <linux/debugfs.h>
# include <linux/bitfield.h>
# include "dw-edma-v0-debugfs.h"
# include "dw-edma-v0-regs.h"
# include "dw-edma-core.h"
2023-01-13 20:13:58 +03:00
# define REGS_ADDR(dw, name) \
( { \
struct dw_edma_v0_regs __iomem * __regs = ( dw ) - > chip - > reg_base ; \
\
( void __iomem * ) & __regs - > name ; \
} )
2023-01-13 20:13:57 +03:00
2023-01-13 20:13:58 +03:00
# define REGS_CH_ADDR(dw, name, _dir, _ch) \
2023-01-13 20:13:57 +03:00
( { \
struct dw_edma_v0_ch_regs __iomem * __ch_regs ; \
\
if ( ( dw ) - > chip - > mf = = EDMA_MF_EDMA_LEGACY ) \
2023-01-13 20:13:58 +03:00
__ch_regs = REGS_ADDR ( dw , type . legacy . ch ) ; \
2023-01-13 20:13:57 +03:00
else if ( _dir = = EDMA_DIR_READ ) \
2023-01-13 20:13:58 +03:00
__ch_regs = REGS_ADDR ( dw , type . unroll . ch [ _ch ] . rd ) ; \
2023-01-13 20:13:57 +03:00
else \
2023-01-13 20:13:58 +03:00
__ch_regs = REGS_ADDR ( dw , type . unroll . ch [ _ch ] . wr ) ; \
2023-01-13 20:13:57 +03:00
\
( void __iomem * ) & __ch_regs - > name ; \
} )
2023-01-13 20:13:58 +03:00
# define REGISTER(dw, name) \
{ dw , # name , REGS_ADDR ( dw , name ) }
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
# define CTX_REGISTER(dw, name, dir, ch) \
{ dw , # name , REGS_CH_ADDR ( dw , name , dir , ch ) , dir , ch }
2023-01-13 20:13:57 +03:00
2023-01-13 20:13:58 +03:00
# define WR_REGISTER(dw, name) \
{ dw , # name , REGS_ADDR ( dw , wr_ # # name ) }
# define RD_REGISTER(dw, name) \
{ dw , # name , REGS_ADDR ( dw , rd_ # # name ) }
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
# define WR_REGISTER_LEGACY(dw, name) \
{ dw , # name , REGS_ADDR ( dw , type . legacy . wr_ # # name ) }
2019-06-04 15:29:24 +02:00
# define RD_REGISTER_LEGACY(name) \
2023-01-13 20:13:58 +03:00
{ dw , # name , REGS_ADDR ( dw , type . legacy . rd_ # # name ) }
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
# define WR_REGISTER_UNROLL(dw, name) \
{ dw , # name , REGS_ADDR ( dw , type . unroll . wr_ # # name ) }
# define RD_REGISTER_UNROLL(dw, name) \
{ dw , # name , REGS_ADDR ( dw , type . unroll . rd_ # # name ) }
2019-06-04 15:29:24 +02:00
# define WRITE_STR "write"
# define READ_STR "read"
# define CHANNEL_STR "channel"
# define REGISTERS_STR "registers"
2023-01-13 20:13:54 +03:00
struct dw_edma_debugfs_entry {
2023-01-13 20:13:58 +03:00
struct dw_edma * dw ;
2019-07-22 14:44:43 +02:00
const char * name ;
2023-01-13 20:13:52 +03:00
void __iomem * reg ;
2023-01-13 20:13:57 +03:00
enum dw_edma_dir dir ;
u16 ch ;
2019-06-04 15:29:24 +02:00
} ;
static int dw_edma_debugfs_u32_get ( void * data , u64 * val )
{
2023-01-13 20:13:55 +03:00
struct dw_edma_debugfs_entry * entry = data ;
2023-01-13 20:13:58 +03:00
struct dw_edma * dw = entry - > dw ;
2023-01-13 20:13:55 +03:00
void __iomem * reg = entry - > reg ;
2023-01-13 20:13:52 +03:00
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_EDMA_LEGACY & &
2023-01-13 20:13:58 +03:00
reg > = REGS_ADDR ( dw , type . legacy . ch ) ) {
2019-06-04 15:29:24 +02:00
unsigned long flags ;
2023-01-13 20:13:57 +03:00
u32 viewport_sel ;
viewport_sel = entry - > dir = = EDMA_DIR_READ ? BIT ( 31 ) : 0 ;
viewport_sel | = FIELD_PREP ( EDMA_V0_VIEWPORT_MASK , entry - > ch ) ;
2019-06-04 15:29:24 +02:00
raw_spin_lock_irqsave ( & dw - > lock , flags ) ;
2023-01-13 20:13:58 +03:00
writel ( viewport_sel , REGS_ADDR ( dw , type . legacy . viewport_sel ) ) ;
2023-01-13 20:13:57 +03:00
* val = readl ( reg ) ;
2019-06-04 15:29:24 +02:00
raw_spin_unlock_irqrestore ( & dw - > lock , flags ) ;
} else {
2019-07-22 14:44:44 +02:00
* val = readl ( reg ) ;
2019-06-04 15:29:24 +02:00
}
return 0 ;
}
DEFINE_DEBUGFS_ATTRIBUTE ( fops_x32 , dw_edma_debugfs_u32_get , NULL , " 0x%08llx \n " ) ;
2023-01-13 20:13:58 +03:00
static void dw_edma_debugfs_create_x32 ( struct dw_edma * dw ,
const struct dw_edma_debugfs_entry ini [ ] ,
2023-01-13 20:13:56 +03:00
int nr_entries , struct dentry * dent )
2019-06-04 15:29:24 +02:00
{
2023-01-13 20:13:55 +03:00
struct dw_edma_debugfs_entry * entries ;
2019-06-04 15:29:24 +02:00
int i ;
2023-01-13 20:13:55 +03:00
entries = devm_kcalloc ( dw - > chip - > dev , nr_entries , sizeof ( * entries ) ,
GFP_KERNEL ) ;
if ( ! entries )
return ;
2019-06-04 15:29:24 +02:00
for ( i = 0 ; i < nr_entries ; i + + ) {
2023-01-13 20:13:55 +03:00
entries [ i ] = ini [ i ] ;
2023-01-13 20:13:56 +03:00
debugfs_create_file_unsafe ( entries [ i ] . name , 0444 , dent ,
2023-01-13 20:13:55 +03:00
& entries [ i ] , & fops_x32 ) ;
2019-06-04 15:29:24 +02:00
}
}
2023-01-13 20:13:58 +03:00
static void dw_edma_debugfs_regs_ch ( struct dw_edma * dw , enum dw_edma_dir dir ,
u16 ch , struct dentry * dent )
2019-06-04 15:29:24 +02:00
{
2023-01-13 20:13:57 +03:00
struct dw_edma_debugfs_entry debugfs_regs [ ] = {
2023-01-13 20:13:58 +03:00
CTX_REGISTER ( dw , ch_control1 , dir , ch ) ,
CTX_REGISTER ( dw , ch_control2 , dir , ch ) ,
CTX_REGISTER ( dw , transfer_size , dir , ch ) ,
CTX_REGISTER ( dw , sar . lsb , dir , ch ) ,
CTX_REGISTER ( dw , sar . msb , dir , ch ) ,
CTX_REGISTER ( dw , dar . lsb , dir , ch ) ,
CTX_REGISTER ( dw , dar . msb , dir , ch ) ,
CTX_REGISTER ( dw , llp . lsb , dir , ch ) ,
CTX_REGISTER ( dw , llp . msb , dir , ch ) ,
2019-06-04 15:29:24 +02:00
} ;
2023-01-13 20:13:54 +03:00
int nr_entries ;
2019-06-04 15:29:24 +02:00
nr_entries = ARRAY_SIZE ( debugfs_regs ) ;
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_create_x32 ( dw , debugfs_regs , nr_entries , dent ) ;
2019-06-04 15:29:24 +02:00
}
2023-01-13 20:13:58 +03:00
static noinline_for_stack void
dw_edma_debugfs_regs_wr ( struct dw_edma * dw , struct dentry * dent )
2019-06-04 15:29:24 +02:00
{
2023-01-13 20:13:54 +03:00
const struct dw_edma_debugfs_entry debugfs_regs [ ] = {
2019-06-04 15:29:24 +02:00
/* eDMA global registers */
2023-01-13 20:13:58 +03:00
WR_REGISTER ( dw , engine_en ) ,
WR_REGISTER ( dw , doorbell ) ,
WR_REGISTER ( dw , ch_arb_weight . lsb ) ,
WR_REGISTER ( dw , ch_arb_weight . msb ) ,
2019-06-04 15:29:24 +02:00
/* eDMA interrupts registers */
2023-01-13 20:13:58 +03:00
WR_REGISTER ( dw , int_status ) ,
WR_REGISTER ( dw , int_mask ) ,
WR_REGISTER ( dw , int_clear ) ,
WR_REGISTER ( dw , err_status ) ,
WR_REGISTER ( dw , done_imwr . lsb ) ,
WR_REGISTER ( dw , done_imwr . msb ) ,
WR_REGISTER ( dw , abort_imwr . lsb ) ,
WR_REGISTER ( dw , abort_imwr . msb ) ,
WR_REGISTER ( dw , ch01_imwr_data ) ,
WR_REGISTER ( dw , ch23_imwr_data ) ,
WR_REGISTER ( dw , ch45_imwr_data ) ,
WR_REGISTER ( dw , ch67_imwr_data ) ,
WR_REGISTER ( dw , linked_list_err_en ) ,
2019-06-04 15:29:24 +02:00
} ;
2023-01-13 20:13:54 +03:00
const struct dw_edma_debugfs_entry debugfs_unroll_regs [ ] = {
2019-06-04 15:29:24 +02:00
/* eDMA channel context grouping */
2023-01-13 20:13:58 +03:00
WR_REGISTER_UNROLL ( dw , engine_chgroup ) ,
WR_REGISTER_UNROLL ( dw , engine_hshake_cnt . lsb ) ,
WR_REGISTER_UNROLL ( dw , engine_hshake_cnt . msb ) ,
WR_REGISTER_UNROLL ( dw , ch0_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch1_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch2_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch3_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch4_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch5_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch6_pwr_en ) ,
WR_REGISTER_UNROLL ( dw , ch7_pwr_en ) ,
2019-06-04 15:29:24 +02:00
} ;
2023-01-13 20:13:56 +03:00
struct dentry * regs_dent , * ch_dent ;
2019-06-04 15:29:24 +02:00
int nr_entries , i ;
char name [ 16 ] ;
2023-01-13 20:13:56 +03:00
regs_dent = debugfs_create_dir ( WRITE_STR , dent ) ;
2019-06-04 15:29:24 +02:00
nr_entries = ARRAY_SIZE ( debugfs_regs ) ;
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_create_x32 ( dw , debugfs_regs , nr_entries , regs_dent ) ;
2019-06-04 15:29:24 +02:00
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_HDMA_COMPAT ) {
2019-06-04 15:29:24 +02:00
nr_entries = ARRAY_SIZE ( debugfs_unroll_regs ) ;
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_create_x32 ( dw , debugfs_unroll_regs , nr_entries ,
2023-01-13 20:13:56 +03:00
regs_dent ) ;
2019-06-04 15:29:24 +02:00
}
for ( i = 0 ; i < dw - > wr_ch_cnt ; i + + ) {
snprintf ( name , sizeof ( name ) , " %s:%d " , CHANNEL_STR , i ) ;
2023-01-13 20:13:56 +03:00
ch_dent = debugfs_create_dir ( name , regs_dent ) ;
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_regs_ch ( dw , EDMA_DIR_WRITE , i , ch_dent ) ;
2019-06-04 15:29:24 +02:00
}
}
2023-01-13 20:13:58 +03:00
static noinline_for_stack void dw_edma_debugfs_regs_rd ( struct dw_edma * dw ,
struct dentry * dent )
2019-06-04 15:29:24 +02:00
{
2023-01-13 20:13:54 +03:00
const struct dw_edma_debugfs_entry debugfs_regs [ ] = {
2019-06-04 15:29:24 +02:00
/* eDMA global registers */
2023-01-13 20:13:58 +03:00
RD_REGISTER ( dw , engine_en ) ,
RD_REGISTER ( dw , doorbell ) ,
RD_REGISTER ( dw , ch_arb_weight . lsb ) ,
RD_REGISTER ( dw , ch_arb_weight . msb ) ,
2019-06-04 15:29:24 +02:00
/* eDMA interrupts registers */
2023-01-13 20:13:58 +03:00
RD_REGISTER ( dw , int_status ) ,
RD_REGISTER ( dw , int_mask ) ,
RD_REGISTER ( dw , int_clear ) ,
RD_REGISTER ( dw , err_status . lsb ) ,
RD_REGISTER ( dw , err_status . msb ) ,
RD_REGISTER ( dw , linked_list_err_en ) ,
RD_REGISTER ( dw , done_imwr . lsb ) ,
RD_REGISTER ( dw , done_imwr . msb ) ,
RD_REGISTER ( dw , abort_imwr . lsb ) ,
RD_REGISTER ( dw , abort_imwr . msb ) ,
RD_REGISTER ( dw , ch01_imwr_data ) ,
RD_REGISTER ( dw , ch23_imwr_data ) ,
RD_REGISTER ( dw , ch45_imwr_data ) ,
RD_REGISTER ( dw , ch67_imwr_data ) ,
2019-06-04 15:29:24 +02:00
} ;
2023-01-13 20:13:54 +03:00
const struct dw_edma_debugfs_entry debugfs_unroll_regs [ ] = {
2019-06-04 15:29:24 +02:00
/* eDMA channel context grouping */
2023-01-13 20:13:58 +03:00
RD_REGISTER_UNROLL ( dw , engine_chgroup ) ,
RD_REGISTER_UNROLL ( dw , engine_hshake_cnt . lsb ) ,
RD_REGISTER_UNROLL ( dw , engine_hshake_cnt . msb ) ,
RD_REGISTER_UNROLL ( dw , ch0_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch1_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch2_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch3_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch4_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch5_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch6_pwr_en ) ,
RD_REGISTER_UNROLL ( dw , ch7_pwr_en ) ,
2019-06-04 15:29:24 +02:00
} ;
2023-01-13 20:13:56 +03:00
struct dentry * regs_dent , * ch_dent ;
2019-06-04 15:29:24 +02:00
int nr_entries , i ;
char name [ 16 ] ;
2023-01-13 20:13:56 +03:00
regs_dent = debugfs_create_dir ( READ_STR , dent ) ;
2019-06-04 15:29:24 +02:00
nr_entries = ARRAY_SIZE ( debugfs_regs ) ;
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_create_x32 ( dw , debugfs_regs , nr_entries , regs_dent ) ;
2019-06-04 15:29:24 +02:00
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_HDMA_COMPAT ) {
2019-06-04 15:29:24 +02:00
nr_entries = ARRAY_SIZE ( debugfs_unroll_regs ) ;
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_create_x32 ( dw , debugfs_unroll_regs , nr_entries ,
2023-01-13 20:13:56 +03:00
regs_dent ) ;
2019-06-04 15:29:24 +02:00
}
for ( i = 0 ; i < dw - > rd_ch_cnt ; i + + ) {
snprintf ( name , sizeof ( name ) , " %s:%d " , CHANNEL_STR , i ) ;
2023-01-13 20:13:56 +03:00
ch_dent = debugfs_create_dir ( name , regs_dent ) ;
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_regs_ch ( dw , EDMA_DIR_READ , i , ch_dent ) ;
2019-06-04 15:29:24 +02:00
}
}
2023-01-13 20:13:58 +03:00
static void dw_edma_debugfs_regs ( struct dw_edma * dw )
2019-06-04 15:29:24 +02:00
{
2023-01-13 20:13:54 +03:00
const struct dw_edma_debugfs_entry debugfs_regs [ ] = {
2023-01-13 20:13:58 +03:00
REGISTER ( dw , ctrl_data_arb_prior ) ,
REGISTER ( dw , ctrl ) ,
2019-06-04 15:29:24 +02:00
} ;
2023-01-13 20:13:56 +03:00
struct dentry * regs_dent ;
2019-06-04 15:29:24 +02:00
int nr_entries ;
2023-01-13 20:14:00 +03:00
regs_dent = debugfs_create_dir ( REGISTERS_STR , dw - > dma . dbg_dev_root ) ;
2019-06-04 15:29:24 +02:00
nr_entries = ARRAY_SIZE ( debugfs_regs ) ;
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_create_x32 ( dw , debugfs_regs , nr_entries , regs_dent ) ;
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_regs_wr ( dw , regs_dent ) ;
dw_edma_debugfs_regs_rd ( dw , regs_dent ) ;
2019-06-04 15:29:24 +02:00
}
2023-01-13 20:13:58 +03:00
void dw_edma_v0_debugfs_on ( struct dw_edma * dw )
2019-06-04 15:29:24 +02:00
{
2023-01-13 20:13:53 +03:00
if ( ! debugfs_initialized ( ) )
return ;
2023-01-13 20:14:00 +03:00
debugfs_create_u32 ( " mf " , 0444 , dw - > dma . dbg_dev_root , & dw - > chip - > mf ) ;
debugfs_create_u16 ( " wr_ch_cnt " , 0444 , dw - > dma . dbg_dev_root , & dw - > wr_ch_cnt ) ;
debugfs_create_u16 ( " rd_ch_cnt " , 0444 , dw - > dma . dbg_dev_root , & dw - > rd_ch_cnt ) ;
2019-06-04 15:29:24 +02:00
2023-01-13 20:13:58 +03:00
dw_edma_debugfs_regs ( dw ) ;
2019-06-04 15:29:24 +02:00
}