2019-06-04 15:29:23 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( c ) 2018 - 2019 Synopsys , Inc . and / or its affiliates .
* Synopsys DesignWare eDMA v0 core
*
* Author : Gustavo Pimentel < gustavo . pimentel @ synopsys . com >
*/
# include <linux/bitfield.h>
# include "dw-edma-core.h"
# include "dw-edma-v0-core.h"
# include "dw-edma-v0-regs.h"
# include "dw-edma-v0-debugfs.h"
enum dw_edma_control {
DW_EDMA_V0_CB = BIT ( 0 ) ,
DW_EDMA_V0_TCB = BIT ( 1 ) ,
DW_EDMA_V0_LLP = BIT ( 2 ) ,
DW_EDMA_V0_LIE = BIT ( 3 ) ,
DW_EDMA_V0_RIE = BIT ( 4 ) ,
DW_EDMA_V0_CCS = BIT ( 8 ) ,
DW_EDMA_V0_LLE = BIT ( 9 ) ,
} ;
static inline struct dw_edma_v0_regs __iomem * __dw_regs ( struct dw_edma * dw )
{
2022-05-24 10:21:54 -05:00
return dw - > chip - > reg_base ;
2019-06-04 15:29:23 +02:00
}
2021-02-18 20:03:55 +01:00
# define SET_32(dw, name, value) \
2019-06-04 15:29:23 +02:00
writel ( value , & ( __dw_regs ( dw ) - > name ) )
2021-02-18 20:03:55 +01:00
# define GET_32(dw, name) \
2019-06-04 15:29:23 +02:00
readl ( & ( __dw_regs ( dw ) - > name ) )
2021-02-18 20:03:55 +01:00
# define SET_RW_32(dw, dir, name, value) \
2019-06-04 15:29:23 +02:00
do { \
if ( ( dir ) = = EDMA_DIR_WRITE ) \
2021-02-18 20:03:55 +01:00
SET_32 ( dw , wr_ # # name , value ) ; \
2019-06-04 15:29:23 +02:00
else \
2021-02-18 20:03:55 +01:00
SET_32 ( dw , rd_ # # name , value ) ; \
2019-06-04 15:29:23 +02:00
} while ( 0 )
2021-02-18 20:03:55 +01:00
# define GET_RW_32(dw, dir, name) \
2019-06-04 15:29:23 +02:00
( ( dir ) = = EDMA_DIR_WRITE \
2021-02-18 20:03:55 +01:00
? GET_32 ( dw , wr_ # # name ) \
: GET_32 ( dw , rd_ # # name ) )
2019-06-04 15:29:23 +02:00
2021-02-18 20:03:55 +01:00
# define SET_BOTH_32(dw, name, value) \
2019-06-04 15:29:23 +02:00
do { \
2021-02-18 20:03:55 +01:00
SET_32 ( dw , wr_ # # name , value ) ; \
SET_32 ( dw , rd_ # # name , value ) ; \
} while ( 0 )
# ifdef CONFIG_64BIT
# define SET_64(dw, name, value) \
writeq ( value , & ( __dw_regs ( dw ) - > name ) )
# define GET_64(dw, name) \
readq ( & ( __dw_regs ( dw ) - > name ) )
# define SET_RW_64(dw, dir, name, value) \
do { \
if ( ( dir ) = = EDMA_DIR_WRITE ) \
SET_64 ( dw , wr_ # # name , value ) ; \
else \
SET_64 ( dw , rd_ # # name , value ) ; \
} while ( 0 )
# define GET_RW_64(dw, dir, name) \
( ( dir ) = = EDMA_DIR_WRITE \
? GET_64 ( dw , wr_ # # name ) \
: GET_64 ( dw , rd_ # # name ) )
# define SET_BOTH_64(dw, name, value) \
do { \
SET_64 ( dw , wr_ # # name , value ) ; \
SET_64 ( dw , rd_ # # name , value ) ; \
} while ( 0 )
# endif /* CONFIG_64BIT */
# define SET_COMPAT(dw, name, value) \
writel ( value , & ( __dw_regs ( dw ) - > type . unroll . name ) )
# define SET_RW_COMPAT(dw, dir, name, value) \
do { \
if ( ( dir ) = = EDMA_DIR_WRITE ) \
SET_COMPAT ( dw , wr_ # # name , value ) ; \
else \
SET_COMPAT ( dw , rd_ # # name , value ) ; \
2019-06-04 15:29:23 +02:00
} while ( 0 )
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs ( struct dw_edma * dw , enum dw_edma_dir dir , u16 ch )
{
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_EDMA_LEGACY )
2019-06-04 15:29:23 +02:00
return & ( __dw_regs ( dw ) - > type . legacy . ch ) ;
if ( dir = = EDMA_DIR_WRITE )
return & __dw_regs ( dw ) - > type . unroll . ch [ ch ] . wr ;
return & __dw_regs ( dw ) - > type . unroll . ch [ ch ] . rd ;
}
static inline void writel_ch ( struct dw_edma * dw , enum dw_edma_dir dir , u16 ch ,
u32 value , void __iomem * addr )
{
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_EDMA_LEGACY ) {
2019-06-04 15:29:23 +02:00
u32 viewport_sel ;
unsigned long flags ;
raw_spin_lock_irqsave ( & dw - > lock , flags ) ;
viewport_sel = FIELD_PREP ( EDMA_V0_VIEWPORT_MASK , ch ) ;
if ( dir = = EDMA_DIR_READ )
viewport_sel | = BIT ( 31 ) ;
writel ( viewport_sel ,
& ( __dw_regs ( dw ) - > type . legacy . viewport_sel ) ) ;
writel ( value , addr ) ;
raw_spin_unlock_irqrestore ( & dw - > lock , flags ) ;
} else {
writel ( value , addr ) ;
}
}
static inline u32 readl_ch ( struct dw_edma * dw , enum dw_edma_dir dir , u16 ch ,
const void __iomem * addr )
{
u32 value ;
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_EDMA_LEGACY ) {
2019-06-04 15:29:23 +02:00
u32 viewport_sel ;
unsigned long flags ;
raw_spin_lock_irqsave ( & dw - > lock , flags ) ;
viewport_sel = FIELD_PREP ( EDMA_V0_VIEWPORT_MASK , ch ) ;
if ( dir = = EDMA_DIR_READ )
viewport_sel | = BIT ( 31 ) ;
writel ( viewport_sel ,
& ( __dw_regs ( dw ) - > type . legacy . viewport_sel ) ) ;
value = readl ( addr ) ;
raw_spin_unlock_irqrestore ( & dw - > lock , flags ) ;
} else {
value = readl ( addr ) ;
}
return value ;
}
2021-02-18 20:03:55 +01:00
# define SET_CH_32(dw, dir, ch, name, value) \
2019-06-04 15:29:23 +02:00
writel_ch ( dw , dir , ch , value , & ( __dw_ch_regs ( dw , dir , ch ) - > name ) )
2021-02-18 20:03:55 +01:00
# define GET_CH_32(dw, dir, ch, name) \
2019-06-04 15:29:23 +02:00
readl_ch ( dw , dir , ch , & ( __dw_ch_regs ( dw , dir , ch ) - > name ) )
2021-02-18 20:03:55 +01:00
# define SET_LL_32(ll, value) \
2019-06-04 15:29:23 +02:00
writel ( value , ll )
2021-02-18 20:03:55 +01:00
# ifdef CONFIG_64BIT
static inline void writeq_ch ( struct dw_edma * dw , enum dw_edma_dir dir , u16 ch ,
u64 value , void __iomem * addr )
{
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_EDMA_LEGACY ) {
2021-02-18 20:03:55 +01:00
u32 viewport_sel ;
unsigned long flags ;
raw_spin_lock_irqsave ( & dw - > lock , flags ) ;
viewport_sel = FIELD_PREP ( EDMA_V0_VIEWPORT_MASK , ch ) ;
if ( dir = = EDMA_DIR_READ )
viewport_sel | = BIT ( 31 ) ;
writel ( viewport_sel ,
& ( __dw_regs ( dw ) - > type . legacy . viewport_sel ) ) ;
writeq ( value , addr ) ;
raw_spin_unlock_irqrestore ( & dw - > lock , flags ) ;
} else {
writeq ( value , addr ) ;
}
}
static inline u64 readq_ch ( struct dw_edma * dw , enum dw_edma_dir dir , u16 ch ,
const void __iomem * addr )
{
u32 value ;
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_EDMA_LEGACY ) {
2021-02-18 20:03:55 +01:00
u32 viewport_sel ;
unsigned long flags ;
raw_spin_lock_irqsave ( & dw - > lock , flags ) ;
viewport_sel = FIELD_PREP ( EDMA_V0_VIEWPORT_MASK , ch ) ;
if ( dir = = EDMA_DIR_READ )
viewport_sel | = BIT ( 31 ) ;
writel ( viewport_sel ,
& ( __dw_regs ( dw ) - > type . legacy . viewport_sel ) ) ;
value = readq ( addr ) ;
raw_spin_unlock_irqrestore ( & dw - > lock , flags ) ;
} else {
value = readq ( addr ) ;
}
return value ;
}
# define SET_CH_64(dw, dir, ch, name, value) \
writeq_ch ( dw , dir , ch , value , & ( __dw_ch_regs ( dw , dir , ch ) - > name ) )
# define GET_CH_64(dw, dir, ch, name) \
readq_ch ( dw , dir , ch , & ( __dw_ch_regs ( dw , dir , ch ) - > name ) )
# define SET_LL_64(ll, value) \
writeq ( value , ll )
# endif /* CONFIG_64BIT */
2019-06-04 15:29:23 +02:00
/* eDMA management callbacks */
void dw_edma_v0_core_off ( struct dw_edma * dw )
{
2021-02-18 20:03:55 +01:00
SET_BOTH_32 ( dw , int_mask ,
EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK ) ;
SET_BOTH_32 ( dw , int_clear ,
EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK ) ;
SET_BOTH_32 ( dw , engine_en , 0 ) ;
2019-06-04 15:29:23 +02:00
}
u16 dw_edma_v0_core_ch_count ( struct dw_edma * dw , enum dw_edma_dir dir )
{
u32 num_ch ;
if ( dir = = EDMA_DIR_WRITE )
2021-02-18 20:03:55 +01:00
num_ch = FIELD_GET ( EDMA_V0_WRITE_CH_COUNT_MASK ,
GET_32 ( dw , ctrl ) ) ;
2019-06-04 15:29:23 +02:00
else
2021-02-18 20:03:55 +01:00
num_ch = FIELD_GET ( EDMA_V0_READ_CH_COUNT_MASK ,
GET_32 ( dw , ctrl ) ) ;
2019-06-04 15:29:23 +02:00
if ( num_ch > EDMA_V0_MAX_NR_CH )
num_ch = EDMA_V0_MAX_NR_CH ;
return ( u16 ) num_ch ;
}
enum dma_status dw_edma_v0_core_ch_status ( struct dw_edma_chan * chan )
{
2022-05-24 10:21:53 -05:00
struct dw_edma * dw = chan - > dw ;
2019-06-04 15:29:23 +02:00
u32 tmp ;
tmp = FIELD_GET ( EDMA_V0_CH_STATUS_MASK ,
2021-02-18 20:03:55 +01:00
GET_CH_32 ( dw , chan - > dir , chan - > id , ch_control1 ) ) ;
2019-06-04 15:29:23 +02:00
if ( tmp = = 1 )
return DMA_IN_PROGRESS ;
else if ( tmp = = 3 )
return DMA_COMPLETE ;
else
return DMA_ERROR ;
}
void dw_edma_v0_core_clear_done_int ( struct dw_edma_chan * chan )
{
2022-05-24 10:21:53 -05:00
struct dw_edma * dw = chan - > dw ;
2019-06-04 15:29:23 +02:00
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , int_clear ,
FIELD_PREP ( EDMA_V0_DONE_INT_MASK , BIT ( chan - > id ) ) ) ;
2019-06-04 15:29:23 +02:00
}
void dw_edma_v0_core_clear_abort_int ( struct dw_edma_chan * chan )
{
2022-05-24 10:21:53 -05:00
struct dw_edma * dw = chan - > dw ;
2019-06-04 15:29:23 +02:00
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , int_clear ,
FIELD_PREP ( EDMA_V0_ABORT_INT_MASK , BIT ( chan - > id ) ) ) ;
2019-06-04 15:29:23 +02:00
}
u32 dw_edma_v0_core_status_done_int ( struct dw_edma * dw , enum dw_edma_dir dir )
{
2021-02-18 20:03:55 +01:00
return FIELD_GET ( EDMA_V0_DONE_INT_MASK ,
GET_RW_32 ( dw , dir , int_status ) ) ;
2019-06-04 15:29:23 +02:00
}
u32 dw_edma_v0_core_status_abort_int ( struct dw_edma * dw , enum dw_edma_dir dir )
{
2021-02-18 20:03:55 +01:00
return FIELD_GET ( EDMA_V0_ABORT_INT_MASK ,
GET_RW_32 ( dw , dir , int_status ) ) ;
2019-06-04 15:29:23 +02:00
}
static void dw_edma_v0_core_write_chunk ( struct dw_edma_chunk * chunk )
{
struct dw_edma_burst * child ;
2022-05-24 10:21:58 -05:00
struct dw_edma_chan * chan = chunk - > chan ;
2019-07-22 14:44:44 +02:00
struct dw_edma_v0_lli __iomem * lli ;
struct dw_edma_v0_llp __iomem * llp ;
2019-06-04 15:29:23 +02:00
u32 control = 0 , i = 0 ;
int j ;
2019-07-22 14:44:44 +02:00
lli = chunk - > ll_region . vaddr ;
2019-06-04 15:29:23 +02:00
if ( chunk - > cb )
control = DW_EDMA_V0_CB ;
j = chunk - > bursts_alloc ;
list_for_each_entry ( child , & chunk - > burst - > list , list ) {
j - - ;
2022-05-24 10:21:58 -05:00
if ( ! j ) {
control | = DW_EDMA_V0_LIE ;
if ( ! ( chan - > dw - > chip - > flags & DW_EDMA_CHIP_LOCAL ) )
control | = DW_EDMA_V0_RIE ;
}
2019-06-04 15:29:23 +02:00
/* Channel control */
2021-02-18 20:03:55 +01:00
SET_LL_32 ( & lli [ i ] . control , control ) ;
2019-06-04 15:29:23 +02:00
/* Transfer size */
2021-02-18 20:03:55 +01:00
SET_LL_32 ( & lli [ i ] . transfer_size , child - > sz ) ;
/* SAR */
# ifdef CONFIG_64BIT
SET_LL_64 ( & lli [ i ] . sar . reg , child - > sar ) ;
# else /* CONFIG_64BIT */
SET_LL_32 ( & lli [ i ] . sar . lsb , lower_32_bits ( child - > sar ) ) ;
SET_LL_32 ( & lli [ i ] . sar . msb , upper_32_bits ( child - > sar ) ) ;
# endif /* CONFIG_64BIT */
/* DAR */
# ifdef CONFIG_64BIT
SET_LL_64 ( & lli [ i ] . dar . reg , child - > dar ) ;
# else /* CONFIG_64BIT */
SET_LL_32 ( & lli [ i ] . dar . lsb , lower_32_bits ( child - > dar ) ) ;
SET_LL_32 ( & lli [ i ] . dar . msb , upper_32_bits ( child - > dar ) ) ;
# endif /* CONFIG_64BIT */
2019-06-04 15:29:23 +02:00
i + + ;
}
2019-07-22 14:44:44 +02:00
llp = ( void __iomem * ) & lli [ i ] ;
2019-06-04 15:29:23 +02:00
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB ;
if ( ! chunk - > cb )
control | = DW_EDMA_V0_CB ;
/* Channel control */
2021-02-18 20:03:55 +01:00
SET_LL_32 ( & llp - > control , control ) ;
/* Linked list */
# ifdef CONFIG_64BIT
SET_LL_64 ( & llp - > llp . reg , chunk - > ll_region . paddr ) ;
# else /* CONFIG_64BIT */
SET_LL_32 ( & llp - > llp . lsb , lower_32_bits ( chunk - > ll_region . paddr ) ) ;
SET_LL_32 ( & llp - > llp . msb , upper_32_bits ( chunk - > ll_region . paddr ) ) ;
# endif /* CONFIG_64BIT */
2019-06-04 15:29:23 +02:00
}
void dw_edma_v0_core_start ( struct dw_edma_chunk * chunk , bool first )
{
struct dw_edma_chan * chan = chunk - > chan ;
2022-05-24 10:21:53 -05:00
struct dw_edma * dw = chan - > dw ;
2019-06-04 15:29:23 +02:00
u32 tmp ;
dw_edma_v0_core_write_chunk ( chunk ) ;
if ( first ) {
/* Enable engine */
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , engine_en , BIT ( 0 ) ) ;
2022-05-24 10:21:53 -05:00
if ( dw - > chip - > mf = = EDMA_MF_HDMA_COMPAT ) {
2021-02-18 20:03:57 +01:00
switch ( chan - > id ) {
case 0 :
SET_RW_COMPAT ( dw , chan - > dir , ch0_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 1 :
SET_RW_COMPAT ( dw , chan - > dir , ch1_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 2 :
SET_RW_COMPAT ( dw , chan - > dir , ch2_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 3 :
SET_RW_COMPAT ( dw , chan - > dir , ch3_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 4 :
SET_RW_COMPAT ( dw , chan - > dir , ch4_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 5 :
SET_RW_COMPAT ( dw , chan - > dir , ch5_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 6 :
SET_RW_COMPAT ( dw , chan - > dir , ch6_pwr_en ,
BIT ( 0 ) ) ;
break ;
case 7 :
SET_RW_COMPAT ( dw , chan - > dir , ch7_pwr_en ,
BIT ( 0 ) ) ;
break ;
}
}
2019-06-04 15:29:23 +02:00
/* Interrupt unmask - done, abort */
2021-02-18 20:03:55 +01:00
tmp = GET_RW_32 ( dw , chan - > dir , int_mask ) ;
2019-06-04 15:29:23 +02:00
tmp & = ~ FIELD_PREP ( EDMA_V0_DONE_INT_MASK , BIT ( chan - > id ) ) ;
tmp & = ~ FIELD_PREP ( EDMA_V0_ABORT_INT_MASK , BIT ( chan - > id ) ) ;
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , int_mask , tmp ) ;
2019-06-04 15:29:23 +02:00
/* Linked list error */
2021-02-18 20:03:55 +01:00
tmp = GET_RW_32 ( dw , chan - > dir , linked_list_err_en ) ;
2019-06-04 15:29:23 +02:00
tmp | = FIELD_PREP ( EDMA_V0_LINKED_LIST_ERR_MASK , BIT ( chan - > id ) ) ;
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , linked_list_err_en , tmp ) ;
2019-06-04 15:29:23 +02:00
/* Channel control */
2021-02-18 20:03:55 +01:00
SET_CH_32 ( dw , chan - > dir , chan - > id , ch_control1 ,
( DW_EDMA_V0_CCS | DW_EDMA_V0_LLE ) ) ;
/* Linked list */
2022-04-13 10:34:42 +08:00
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32 ( dw , chan - > dir , chan - > id , llp . lsb ,
lower_32_bits ( chunk - > ll_region . paddr ) ) ;
SET_CH_32 ( dw , chan - > dir , chan - > id , llp . msb ,
upper_32_bits ( chunk - > ll_region . paddr ) ) ;
2019-06-04 15:29:23 +02:00
}
/* Doorbell */
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , doorbell ,
FIELD_PREP ( EDMA_V0_DOORBELL_CH_MASK , chan - > id ) ) ;
2019-06-04 15:29:23 +02:00
}
int dw_edma_v0_core_device_config ( struct dw_edma_chan * chan )
{
2022-05-24 10:21:53 -05:00
struct dw_edma * dw = chan - > dw ;
2019-06-04 15:29:23 +02:00
u32 tmp = 0 ;
/* MSI done addr - low, high */
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , done_imwr . lsb , chan - > msi . address_lo ) ;
SET_RW_32 ( dw , chan - > dir , done_imwr . msb , chan - > msi . address_hi ) ;
2019-06-04 15:29:23 +02:00
/* MSI abort addr - low, high */
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , abort_imwr . lsb , chan - > msi . address_lo ) ;
SET_RW_32 ( dw , chan - > dir , abort_imwr . msb , chan - > msi . address_hi ) ;
2019-06-04 15:29:23 +02:00
/* MSI data - low, high */
switch ( chan - > id ) {
case 0 :
case 1 :
2021-02-18 20:03:55 +01:00
tmp = GET_RW_32 ( dw , chan - > dir , ch01_imwr_data ) ;
2019-06-04 15:29:23 +02:00
break ;
case 2 :
case 3 :
2021-02-18 20:03:55 +01:00
tmp = GET_RW_32 ( dw , chan - > dir , ch23_imwr_data ) ;
2019-06-04 15:29:23 +02:00
break ;
case 4 :
case 5 :
2021-02-18 20:03:55 +01:00
tmp = GET_RW_32 ( dw , chan - > dir , ch45_imwr_data ) ;
2019-06-04 15:29:23 +02:00
break ;
case 6 :
case 7 :
2021-02-18 20:03:55 +01:00
tmp = GET_RW_32 ( dw , chan - > dir , ch67_imwr_data ) ;
2019-06-04 15:29:23 +02:00
break ;
}
if ( chan - > id & BIT ( 0 ) ) {
/* Channel odd {1, 3, 5, 7} */
tmp & = EDMA_V0_CH_EVEN_MSI_DATA_MASK ;
tmp | = FIELD_PREP ( EDMA_V0_CH_ODD_MSI_DATA_MASK ,
chan - > msi . data ) ;
} else {
/* Channel even {0, 2, 4, 6} */
tmp & = EDMA_V0_CH_ODD_MSI_DATA_MASK ;
tmp | = FIELD_PREP ( EDMA_V0_CH_EVEN_MSI_DATA_MASK ,
chan - > msi . data ) ;
}
switch ( chan - > id ) {
case 0 :
case 1 :
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , ch01_imwr_data , tmp ) ;
2019-06-04 15:29:23 +02:00
break ;
case 2 :
case 3 :
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , ch23_imwr_data , tmp ) ;
2019-06-04 15:29:23 +02:00
break ;
case 4 :
case 5 :
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , ch45_imwr_data , tmp ) ;
2019-06-04 15:29:23 +02:00
break ;
case 6 :
case 7 :
2021-02-18 20:03:55 +01:00
SET_RW_32 ( dw , chan - > dir , ch67_imwr_data , tmp ) ;
2019-06-04 15:29:23 +02:00
break ;
}
return 0 ;
}
/* eDMA debugfs callbacks */
2022-05-24 10:21:53 -05:00
void dw_edma_v0_core_debugfs_on ( struct dw_edma * dw )
2019-06-04 15:29:23 +02:00
{
2022-05-24 10:21:53 -05:00
dw_edma_v0_debugfs_on ( dw ) ;
2019-06-04 15:29:23 +02:00
}
2022-05-24 10:21:53 -05:00
void dw_edma_v0_core_debugfs_off ( struct dw_edma * dw )
2019-06-04 15:29:23 +02:00
{
2022-05-24 10:21:53 -05:00
dw_edma_v0_debugfs_off ( dw ) ;
2019-06-04 15:29:23 +02:00
}