2023-01-17 10:27:17 +01:00
// SPDX-License-Identifier: GPL-2.0-only
/*
2024-04-02 12:49:23 +02:00
* Copyright ( C ) 2020 - 2024 Intel Corporation
2023-01-17 10:27:17 +01:00
*/
# include "ivpu_drv.h"
2023-01-17 10:27:21 +01:00
# include "ivpu_fw.h"
2023-07-31 18:12:53 +02:00
# include "ivpu_hw_37xx_reg.h"
2023-01-17 10:27:17 +01:00
# include "ivpu_hw_reg_io.h"
# include "ivpu_hw.h"
2023-01-17 10:27:20 +01:00
# include "ivpu_ipc.h"
2023-01-17 10:27:18 +01:00
# include "ivpu_mmu.h"
2023-01-17 10:27:23 +01:00
# include "ivpu_pm.h"
2023-01-17 10:27:17 +01:00
2023-03-23 13:55:02 +01:00
# define TILE_FUSE_ENABLE_BOTH 0x0
2024-02-14 09:12:58 +01:00
# define TILE_SKU_BOTH 0x3630
2023-01-17 10:27:17 +01:00
/* Work point configuration values */
2023-03-23 13:55:02 +01:00
# define CONFIG_1_TILE 0x01
# define CONFIG_2_TILE 0x02
# define PLL_RATIO_5_3 0x01
# define PLL_RATIO_4_3 0x02
# define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
# define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
# define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
# define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
# define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
# define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
2023-01-17 10:27:17 +01:00
# define PLL_REF_CLK_FREQ (50 * 1000000)
# define PLL_SIMULATION_FREQ (10 * 1000000)
2023-10-28 17:59:29 +02:00
# define PLL_PROF_CLK_FREQ (38400 * 1000)
2023-01-17 10:27:17 +01:00
# define PLL_DEFAULT_EPP_VALUE 0x80
# define TIM_SAFE_ENABLE 0xf1d0dead
# define TIM_WATCHDOG_RESET_VALUE 0xffffffff
# define TIMEOUT_US (150 * USEC_PER_MSEC)
# define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
# define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
2023-10-28 15:34:15 +02:00
# define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
# define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , MMU_IRQ_0_INT ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , MMU_IRQ_1_INT ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , MMU_IRQ_2_INT ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , NOC_FIREWALL_INT ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , CPU_INT_REDIRECT_0_INT ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , CPU_INT_REDIRECT_1_INT ) ) )
# define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_1 , CPU_INT_REDIRECT_3_INT ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_1 , CPU_INT_REDIRECT_4_INT ) ) )
2023-01-17 10:27:17 +01:00
# define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
2023-12-04 13:23:31 +01:00
# define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
2023-07-31 18:12:54 +02:00
( REG_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , UFI_ERR ) ) )
2023-01-17 10:27:17 +01:00
2023-12-04 13:23:31 +01:00
# define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
( REG_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , FREQ_CHANGE ) ) )
2023-01-17 10:27:17 +01:00
# define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
# define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
2023-07-31 18:12:54 +02:00
# define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
( REG_FLD ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , CSS_DBG ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , CSS_CTRL ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , DEC400 ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , MSS_NCE ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , MSS_MBI ) ) | \
( REG_FLD ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , MSS_MBI_CMX ) ) )
2023-01-17 10:27:17 +01:00
static void ivpu_hw_wa_init ( struct ivpu_device * vdev )
{
2023-10-20 12:45:01 +02:00
vdev - > wa . punit_disabled = false ;
2023-01-17 10:27:17 +01:00
vdev - > wa . clear_runtime_mem = false ;
2023-07-03 10:07:25 +02:00
2023-12-04 13:23:31 +01:00
REGB_WR32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , BUTTRESS_ALL_IRQ_MASK ) ;
if ( REGB_RD32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT ) = = BUTTRESS_ALL_IRQ_MASK ) {
/* Writing 1s does not clear the interrupt status register */
2023-07-03 10:07:25 +02:00
vdev - > wa . interrupt_clear_with_0 = true ;
2023-12-04 13:23:31 +01:00
REGB_WR32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , 0x0 ) ;
}
2023-09-01 11:49:51 +02:00
IVPU_PRINT_WA ( punit_disabled ) ;
IVPU_PRINT_WA ( clear_runtime_mem ) ;
IVPU_PRINT_WA ( interrupt_clear_with_0 ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_hw_timeouts_init ( struct ivpu_device * vdev )
{
2023-10-20 12:45:01 +02:00
vdev - > timeout . boot = 1000 ;
vdev - > timeout . jsm = 500 ;
vdev - > timeout . tdr = 2000 ;
vdev - > timeout . reschedule_suspend = 10 ;
vdev - > timeout . autosuspend = 10 ;
2023-10-28 15:34:15 +02:00
vdev - > timeout . d0i3_entry_msg = 5 ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_pll_wait_for_cmd_send ( struct ivpu_device * vdev )
{
2023-07-31 18:12:54 +02:00
return REGB_POLL_FLD ( VPU_37XX_BUTTRESS_WP_REQ_CMD , SEND , 0 , PLL_TIMEOUT_US ) ;
2023-01-17 10:27:17 +01:00
}
/* Send KMD initiated workpoint change */
static int ivpu_pll_cmd_send ( struct ivpu_device * vdev , u16 min_ratio , u16 max_ratio ,
u16 target_ratio , u16 config )
{
int ret ;
u32 val ;
ret = ivpu_pll_wait_for_cmd_send ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to sync before WP request: %d \n " , ret ) ;
return ret ;
}
2023-07-31 18:12:54 +02:00
val = REGB_RD32 ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 ) ;
val = REG_SET_FLD_NUM ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 , MIN_RATIO , min_ratio , val ) ;
val = REG_SET_FLD_NUM ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 , MAX_RATIO , max_ratio , val ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
val = REGB_RD32 ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 ) ;
val = REG_SET_FLD_NUM ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 , TARGET_RATIO , target_ratio , val ) ;
val = REG_SET_FLD_NUM ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 , EPP , PLL_DEFAULT_EPP_VALUE , val ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
val = REGB_RD32 ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 ) ;
val = REG_SET_FLD_NUM ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 , CONFIG , config , val ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
val = REGB_RD32 ( VPU_37XX_BUTTRESS_WP_REQ_CMD ) ;
val = REG_SET_FLD ( VPU_37XX_BUTTRESS_WP_REQ_CMD , SEND , val ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_WP_REQ_CMD , val ) ;
2023-01-17 10:27:17 +01:00
ret = ivpu_pll_wait_for_cmd_send ( vdev ) ;
if ( ret )
ivpu_err ( vdev , " Failed to sync after WP request: %d \n " , ret ) ;
return ret ;
}
static int ivpu_pll_wait_for_lock ( struct ivpu_device * vdev , bool enable )
{
u32 exp_val = enable ? 0x1 : 0x0 ;
if ( IVPU_WA ( punit_disabled ) )
return 0 ;
2023-07-31 18:12:54 +02:00
return REGB_POLL_FLD ( VPU_37XX_BUTTRESS_PLL_STATUS , LOCK , exp_val , PLL_TIMEOUT_US ) ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_pll_wait_for_status_ready ( struct ivpu_device * vdev )
{
if ( IVPU_WA ( punit_disabled ) )
return 0 ;
2023-07-31 18:12:54 +02:00
return REGB_POLL_FLD ( VPU_37XX_BUTTRESS_VPU_STATUS , READY , 1 , PLL_TIMEOUT_US ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_pll_init_frequency_ratios ( struct ivpu_device * vdev )
{
struct ivpu_hw_info * hw = vdev - > hw ;
u8 fuse_min_ratio , fuse_max_ratio , fuse_pn_ratio ;
u32 fmin_fuse , fmax_fuse ;
2023-07-31 18:12:54 +02:00
fmin_fuse = REGB_RD32 ( VPU_37XX_BUTTRESS_FMIN_FUSE ) ;
fuse_min_ratio = REG_GET_FLD ( VPU_37XX_BUTTRESS_FMIN_FUSE , MIN_RATIO , fmin_fuse ) ;
fuse_pn_ratio = REG_GET_FLD ( VPU_37XX_BUTTRESS_FMIN_FUSE , PN_RATIO , fmin_fuse ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
fmax_fuse = REGB_RD32 ( VPU_37XX_BUTTRESS_FMAX_FUSE ) ;
fuse_max_ratio = REG_GET_FLD ( VPU_37XX_BUTTRESS_FMAX_FUSE , MAX_RATIO , fmax_fuse ) ;
2023-01-17 10:27:17 +01:00
hw - > pll . min_ratio = clamp_t ( u8 , ivpu_pll_min_ratio , fuse_min_ratio , fuse_max_ratio ) ;
hw - > pll . max_ratio = clamp_t ( u8 , ivpu_pll_max_ratio , hw - > pll . min_ratio , fuse_max_ratio ) ;
hw - > pll . pn_ratio = clamp_t ( u8 , fuse_pn_ratio , hw - > pll . min_ratio , hw - > pll . max_ratio ) ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_wait_for_vpuip_bar ( struct ivpu_device * vdev )
2023-06-07 11:45:02 +02:00
{
2023-07-31 18:12:54 +02:00
return REGV_POLL_FLD ( VPU_37XX_HOST_SS_CPR_RST_CLR , AON , 0 , 100 ) ;
2023-06-07 11:45:02 +02:00
}
2023-01-17 10:27:17 +01:00
static int ivpu_pll_drive ( struct ivpu_device * vdev , bool enable )
{
struct ivpu_hw_info * hw = vdev - > hw ;
u16 target_ratio ;
u16 config ;
int ret ;
if ( IVPU_WA ( punit_disabled ) ) {
2023-10-20 12:45:01 +02:00
ivpu_dbg ( vdev , PM , " Skipping PLL request \n " ) ;
2023-01-17 10:27:17 +01:00
return 0 ;
}
if ( enable ) {
target_ratio = hw - > pll . pn_ratio ;
config = hw - > config ;
} else {
target_ratio = 0 ;
config = 0 ;
}
2023-03-23 13:55:02 +01:00
ivpu_dbg ( vdev , PM , " PLL workpoint request: config 0x%04x pll ratio 0x%x \n " ,
config , target_ratio ) ;
2023-01-17 10:27:17 +01:00
ret = ivpu_pll_cmd_send ( vdev , hw - > pll . min_ratio , hw - > pll . max_ratio , target_ratio , config ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to send PLL workpoint request: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_pll_wait_for_lock ( vdev , enable ) ;
if ( ret ) {
ivpu_err ( vdev , " Timed out waiting for PLL lock \n " ) ;
return ret ;
}
if ( enable ) {
ret = ivpu_pll_wait_for_status_ready ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Timed out waiting for PLL ready status \n " ) ;
return ret ;
}
2023-06-07 11:45:02 +02:00
2023-07-31 18:12:54 +02:00
ret = ivpu_hw_37xx_wait_for_vpuip_bar ( vdev ) ;
2023-06-07 11:45:02 +02:00
if ( ret ) {
2024-02-14 09:13:05 +01:00
ivpu_err ( vdev , " Timed out waiting for NPU IP bar \n " ) ;
2023-06-07 11:45:02 +02:00
return ret ;
}
2023-01-17 10:27:17 +01:00
}
return 0 ;
}
static int ivpu_pll_enable ( struct ivpu_device * vdev )
{
return ivpu_pll_drive ( vdev , true ) ;
}
static int ivpu_pll_disable ( struct ivpu_device * vdev )
{
return ivpu_pll_drive ( vdev , false ) ;
}
static void ivpu_boot_host_ss_rst_clr_assert ( struct ivpu_device * vdev )
{
2023-06-07 11:45:02 +02:00
u32 val = 0 ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_RST_CLR , TOP_NOC , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_RST_CLR , DSS_MAS , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_RST_CLR , MSS_MAS , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_CPR_RST_CLR , val ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_boot_host_ss_rst_drive ( struct ivpu_device * vdev , bool enable )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_CPR_RST_SET ) ;
2023-01-17 10:27:17 +01:00
if ( enable ) {
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_RST_SET , TOP_NOC , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_RST_SET , DSS_MAS , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_RST_SET , MSS_MAS , val ) ;
2023-01-17 10:27:17 +01:00
} else {
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_CPR_RST_SET , TOP_NOC , val ) ;
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_CPR_RST_SET , DSS_MAS , val ) ;
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_CPR_RST_SET , MSS_MAS , val ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_CPR_RST_SET , val ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_boot_host_ss_clk_drive ( struct ivpu_device * vdev , bool enable )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_CPR_CLK_SET ) ;
2023-01-17 10:27:17 +01:00
if ( enable ) {
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_CLK_SET , TOP_NOC , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_CLK_SET , DSS_MAS , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_SS_CPR_CLK_SET , MSS_MAS , val ) ;
2023-01-17 10:27:17 +01:00
} else {
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_CPR_CLK_SET , TOP_NOC , val ) ;
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_CPR_CLK_SET , DSS_MAS , val ) ;
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_CPR_CLK_SET , MSS_MAS , val ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_CPR_CLK_SET , val ) ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_boot_noc_qreqn_check ( struct ivpu_device * vdev , u32 exp_val )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_NOC_QREQN ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( ! REG_TEST_FLD_NUM ( VPU_37XX_HOST_SS_NOC_QREQN , TOP_SOCMMIO , exp_val , val ) )
2023-01-17 10:27:17 +01:00
return - EIO ;
return 0 ;
}
static int ivpu_boot_noc_qacceptn_check ( struct ivpu_device * vdev , u32 exp_val )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_NOC_QACCEPTN ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( ! REG_TEST_FLD_NUM ( VPU_37XX_HOST_SS_NOC_QACCEPTN , TOP_SOCMMIO , exp_val , val ) )
2023-01-17 10:27:17 +01:00
return - EIO ;
return 0 ;
}
static int ivpu_boot_noc_qdeny_check ( struct ivpu_device * vdev , u32 exp_val )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_NOC_QDENY ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( ! REG_TEST_FLD_NUM ( VPU_37XX_HOST_SS_NOC_QDENY , TOP_SOCMMIO , exp_val , val ) )
2023-01-17 10:27:17 +01:00
return - EIO ;
return 0 ;
}
static int ivpu_boot_top_noc_qrenqn_check ( struct ivpu_device * vdev , u32 exp_val )
{
2023-09-01 11:49:55 +02:00
u32 val = REGV_RD32 ( VPU_37XX_TOP_NOC_QREQN ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
if ( ! REG_TEST_FLD_NUM ( VPU_37XX_TOP_NOC_QREQN , CPU_CTRL , exp_val , val ) | |
! REG_TEST_FLD_NUM ( VPU_37XX_TOP_NOC_QREQN , HOSTIF_L2CACHE , exp_val , val ) )
2023-01-17 10:27:17 +01:00
return - EIO ;
return 0 ;
}
static int ivpu_boot_top_noc_qacceptn_check ( struct ivpu_device * vdev , u32 exp_val )
{
2023-09-01 11:49:55 +02:00
u32 val = REGV_RD32 ( VPU_37XX_TOP_NOC_QACCEPTN ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
if ( ! REG_TEST_FLD_NUM ( VPU_37XX_TOP_NOC_QACCEPTN , CPU_CTRL , exp_val , val ) | |
! REG_TEST_FLD_NUM ( VPU_37XX_TOP_NOC_QACCEPTN , HOSTIF_L2CACHE , exp_val , val ) )
2023-01-17 10:27:17 +01:00
return - EIO ;
return 0 ;
}
static int ivpu_boot_top_noc_qdeny_check ( struct ivpu_device * vdev , u32 exp_val )
{
2023-09-01 11:49:55 +02:00
u32 val = REGV_RD32 ( VPU_37XX_TOP_NOC_QDENY ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
if ( ! REG_TEST_FLD_NUM ( VPU_37XX_TOP_NOC_QDENY , CPU_CTRL , exp_val , val ) | |
! REG_TEST_FLD_NUM ( VPU_37XX_TOP_NOC_QDENY , HOSTIF_L2CACHE , exp_val , val ) )
2023-01-17 10:27:17 +01:00
return - EIO ;
return 0 ;
}
static int ivpu_boot_host_ss_configure ( struct ivpu_device * vdev )
{
ivpu_boot_host_ss_rst_clr_assert ( vdev ) ;
return ivpu_boot_noc_qreqn_check ( vdev , 0x0 ) ;
}
static void ivpu_boot_vpu_idle_gen_disable ( struct ivpu_device * vdev )
{
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN , 0x0 ) ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_boot_host_ss_axi_drive ( struct ivpu_device * vdev , bool enable )
{
int ret ;
u32 val ;
2023-07-31 18:12:54 +02:00
val = REGV_RD32 ( VPU_37XX_HOST_SS_NOC_QREQN ) ;
2023-01-17 10:27:17 +01:00
if ( enable )
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_NOC_QREQN , TOP_SOCMMIO , val ) ;
2023-01-17 10:27:17 +01:00
else
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_NOC_QREQN , TOP_SOCMMIO , val ) ;
REGV_WR32 ( VPU_37XX_HOST_SS_NOC_QREQN , val ) ;
2023-01-17 10:27:17 +01:00
ret = ivpu_boot_noc_qacceptn_check ( vdev , enable ? 0x1 : 0x0 ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed qacceptn check: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_boot_noc_qdeny_check ( vdev , 0x0 ) ;
if ( ret )
ivpu_err ( vdev , " Failed qdeny check: %d \n " , ret ) ;
return ret ;
}
static int ivpu_boot_host_ss_axi_enable ( struct ivpu_device * vdev )
{
return ivpu_boot_host_ss_axi_drive ( vdev , true ) ;
}
static int ivpu_boot_host_ss_top_noc_drive ( struct ivpu_device * vdev , bool enable )
{
int ret ;
u32 val ;
2023-09-01 11:49:55 +02:00
val = REGV_RD32 ( VPU_37XX_TOP_NOC_QREQN ) ;
2023-01-17 10:27:17 +01:00
if ( enable ) {
2023-09-01 11:49:55 +02:00
val = REG_SET_FLD ( VPU_37XX_TOP_NOC_QREQN , CPU_CTRL , val ) ;
val = REG_SET_FLD ( VPU_37XX_TOP_NOC_QREQN , HOSTIF_L2CACHE , val ) ;
2023-01-17 10:27:17 +01:00
} else {
2023-09-01 11:49:55 +02:00
val = REG_CLR_FLD ( VPU_37XX_TOP_NOC_QREQN , CPU_CTRL , val ) ;
val = REG_CLR_FLD ( VPU_37XX_TOP_NOC_QREQN , HOSTIF_L2CACHE , val ) ;
2023-01-17 10:27:17 +01:00
}
2023-09-01 11:49:55 +02:00
REGV_WR32 ( VPU_37XX_TOP_NOC_QREQN , val ) ;
2023-01-17 10:27:17 +01:00
ret = ivpu_boot_top_noc_qacceptn_check ( vdev , enable ? 0x1 : 0x0 ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed qacceptn check: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_boot_top_noc_qdeny_check ( vdev , 0x0 ) ;
if ( ret )
ivpu_err ( vdev , " Failed qdeny check: %d \n " , ret ) ;
return ret ;
}
static int ivpu_boot_host_ss_top_noc_enable ( struct ivpu_device * vdev )
{
return ivpu_boot_host_ss_top_noc_drive ( vdev , true ) ;
}
static void ivpu_boot_pwr_island_trickle_drive ( struct ivpu_device * vdev , bool enable )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 ) ;
2023-01-17 10:27:17 +01:00
if ( enable )
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 , MSS_CPU , val ) ;
2023-01-17 10:27:17 +01:00
else
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 , MSS_CPU , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 , val ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_boot_pwr_island_drive ( struct ivpu_device * vdev , bool enable )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 ) ;
2023-01-17 10:27:17 +01:00
if ( enable )
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 , MSS_CPU , val ) ;
2023-01-17 10:27:17 +01:00
else
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 , MSS_CPU , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 , val ) ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_boot_wait_for_pwr_island_status ( struct ivpu_device * vdev , u32 exp_val )
{
2023-07-31 18:12:54 +02:00
return REGV_POLL_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 , MSS_CPU ,
2023-01-17 10:27:17 +01:00
exp_val , PWR_ISLAND_STATUS_TIMEOUT_US ) ;
}
static void ivpu_boot_pwr_island_isolation_drive ( struct ivpu_device * vdev , bool enable )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 ) ;
2023-01-17 10:27:17 +01:00
if ( enable )
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 , MSS_CPU , val ) ;
2023-01-17 10:27:17 +01:00
else
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 , MSS_CPU , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 , val ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_boot_dpu_active_drive ( struct ivpu_device * vdev , bool enable )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_SS_AON_DPU_ACTIVE ) ;
2023-01-17 10:27:17 +01:00
if ( enable )
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_AON_DPU_ACTIVE , DPU_ACTIVE , val ) ;
2023-01-17 10:27:17 +01:00
else
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_HOST_SS_AON_DPU_ACTIVE , DPU_ACTIVE , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_AON_DPU_ACTIVE , val ) ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_boot_pwr_domain_enable ( struct ivpu_device * vdev )
{
int ret ;
ivpu_boot_pwr_island_trickle_drive ( vdev , true ) ;
ivpu_boot_pwr_island_drive ( vdev , true ) ;
ret = ivpu_boot_wait_for_pwr_island_status ( vdev , 0x1 ) ;
if ( ret ) {
ivpu_err ( vdev , " Timed out waiting for power island status \n " ) ;
return ret ;
}
ret = ivpu_boot_top_noc_qrenqn_check ( vdev , 0x0 ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed qrenqn check %d \n " , ret ) ;
return ret ;
}
ivpu_boot_host_ss_clk_drive ( vdev , true ) ;
ivpu_boot_pwr_island_isolation_drive ( vdev , false ) ;
ivpu_boot_host_ss_rst_drive ( vdev , true ) ;
ivpu_boot_dpu_active_drive ( vdev , true ) ;
return ret ;
}
static void ivpu_boot_no_snoop_enable ( struct ivpu_device * vdev )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES , NOSNOOP_OVERRIDE_EN , val ) ;
2024-01-26 13:27:58 +01:00
val = REG_CLR_FLD ( VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES , AW_NOSNOOP_OVERRIDE , val ) ;
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES , AR_NOSNOOP_OVERRIDE , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES , val ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_boot_tbu_mmu_enable ( struct ivpu_device * vdev )
{
2023-07-31 18:12:54 +02:00
u32 val = REGV_RD32 ( VPU_37XX_HOST_IF_TBU_MMUSSIDV ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_IF_TBU_MMUSSIDV , TBU0_AWMMUSSIDV , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_IF_TBU_MMUSSIDV , TBU0_ARMMUSSIDV , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_IF_TBU_MMUSSIDV , TBU2_AWMMUSSIDV , val ) ;
val = REG_SET_FLD ( VPU_37XX_HOST_IF_TBU_MMUSSIDV , TBU2_ARMMUSSIDV , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_IF_TBU_MMUSSIDV , val ) ;
2023-01-17 10:27:17 +01:00
}
static void ivpu_boot_soc_cpu_boot ( struct ivpu_device * vdev )
{
u32 val ;
2023-09-01 11:49:55 +02:00
val = REGV_RD32 ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC ) ;
val = REG_SET_FLD ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , IRQI_RSTRUN0 , val ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
val = REG_CLR_FLD ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , IRQI_RSTVEC , val ) ;
REGV_WR32 ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , val ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
val = REG_SET_FLD ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , IRQI_RESUME0 , val ) ;
REGV_WR32 ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , val ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
val = REG_CLR_FLD ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , IRQI_RESUME0 , val ) ;
REGV_WR32 ( VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC , val ) ;
2023-01-17 10:27:21 +01:00
val = vdev - > fw - > entry_point > > 9 ;
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_LOADING_ADDRESS_LO , val ) ;
2023-01-17 10:27:21 +01:00
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_HOST_SS_LOADING_ADDRESS_LO , DONE , val ) ;
REGV_WR32 ( VPU_37XX_HOST_SS_LOADING_ADDRESS_LO , val ) ;
2023-01-17 10:27:21 +01:00
ivpu_dbg ( vdev , PM , " Booting firmware, mode: %s \n " ,
vdev - > fw - > entry_point = = vdev - > fw - > cold_boot_entry_point ? " cold boot " : " resume " ) ;
2023-01-17 10:27:17 +01:00
}
static int ivpu_boot_d0i3_drive ( struct ivpu_device * vdev , bool enable )
{
int ret ;
u32 val ;
2023-07-31 18:12:54 +02:00
ret = REGB_POLL_FLD ( VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL , INPROGRESS , 0 , TIMEOUT_US ) ;
2023-01-17 10:27:17 +01:00
if ( ret ) {
2023-01-20 09:28:42 +00:00
ivpu_err ( vdev , " Failed to sync before D0i3 transition: %d \n " , ret ) ;
2023-01-17 10:27:17 +01:00
return ret ;
}
2023-07-31 18:12:54 +02:00
val = REGB_RD32 ( VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL ) ;
2023-01-17 10:27:17 +01:00
if ( enable )
2023-07-31 18:12:54 +02:00
val = REG_SET_FLD ( VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL , I3 , val ) ;
2023-01-17 10:27:17 +01:00
else
2023-07-31 18:12:54 +02:00
val = REG_CLR_FLD ( VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL , I3 , val ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL , val ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
ret = REGB_POLL_FLD ( VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL , INPROGRESS , 0 , TIMEOUT_US ) ;
2023-01-17 10:27:17 +01:00
if ( ret )
2023-01-20 09:28:42 +00:00
ivpu_err ( vdev , " Failed to sync after D0i3 transition: %d \n " , ret ) ;
2023-01-17 10:27:17 +01:00
return ret ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_info_init ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
struct ivpu_hw_info * hw = vdev - > hw ;
2023-03-23 13:55:02 +01:00
hw - > tile_fuse = TILE_FUSE_ENABLE_BOTH ;
2024-02-14 09:12:58 +01:00
hw - > sku = TILE_SKU_BOTH ;
2023-03-23 13:55:02 +01:00
hw - > config = WP_CONFIG_2_TILE_4_3_RATIO ;
2023-01-17 10:27:17 +01:00
ivpu_pll_init_frequency_ratios ( vdev ) ;
2023-07-31 18:12:57 +02:00
ivpu_hw_init_range ( & hw - > ranges . global , 0x80000000 , SZ_512M ) ;
ivpu_hw_init_range ( & hw - > ranges . user , 0xc0000000 , 255 * SZ_1M ) ;
ivpu_hw_init_range ( & hw - > ranges . shave , 0x180000000 , SZ_2G ) ;
ivpu_hw_init_range ( & hw - > ranges . dma , 0x200000000 , SZ_8G ) ;
2023-01-17 10:27:17 +01:00
2023-10-20 12:45:01 +02:00
vdev - > platform = IVPU_PLATFORM_SILICON ;
2023-10-20 12:45:00 +02:00
ivpu_hw_wa_init ( vdev ) ;
ivpu_hw_timeouts_init ( vdev ) ;
2023-01-17 10:27:17 +01:00
return 0 ;
}
2024-02-07 11:24:46 +01:00
static int ivpu_hw_37xx_ip_reset ( struct ivpu_device * vdev )
{
int ret ;
u32 val ;
if ( IVPU_WA ( punit_disabled ) )
return 0 ;
ret = REGB_POLL_FLD ( VPU_37XX_BUTTRESS_VPU_IP_RESET , TRIGGER , 0 , TIMEOUT_US ) ;
if ( ret ) {
ivpu_err ( vdev , " Timed out waiting for TRIGGER bit \n " ) ;
return ret ;
}
val = REGB_RD32 ( VPU_37XX_BUTTRESS_VPU_IP_RESET ) ;
val = REG_SET_FLD ( VPU_37XX_BUTTRESS_VPU_IP_RESET , TRIGGER , val ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_VPU_IP_RESET , val ) ;
ret = REGB_POLL_FLD ( VPU_37XX_BUTTRESS_VPU_IP_RESET , TRIGGER , 0 , TIMEOUT_US ) ;
if ( ret )
ivpu_err ( vdev , " Timed out waiting for RESET completion \n " ) ;
return ret ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_reset ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-11-15 12:10:04 +01:00
int ret = 0 ;
2023-01-17 10:27:17 +01:00
2024-02-07 11:24:46 +01:00
if ( ivpu_hw_37xx_ip_reset ( vdev ) ) {
ivpu_err ( vdev , " Failed to reset NPU \n " ) ;
2023-11-15 12:10:04 +01:00
ret = - EIO ;
2023-01-17 10:27:17 +01:00
}
2023-11-15 12:10:04 +01:00
if ( ivpu_pll_disable ( vdev ) ) {
ivpu_err ( vdev , " Failed to disable PLL \n " ) ;
ret = - EIO ;
}
2023-01-17 10:27:17 +01:00
return ret ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_d0i3_enable ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
int ret ;
ret = ivpu_boot_d0i3_drive ( vdev , true ) ;
if ( ret )
ivpu_err ( vdev , " Failed to enable D0i3: %d \n " , ret ) ;
udelay ( 5 ) ; /* VPU requires 5 us to complete the transition */
return ret ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_d0i3_disable ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
int ret ;
ret = ivpu_boot_d0i3_drive ( vdev , false ) ;
if ( ret )
ivpu_err ( vdev , " Failed to disable D0i3: %d \n " , ret ) ;
return ret ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_power_up ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
int ret ;
2024-02-07 11:24:46 +01:00
/* PLL requests may fail when powering down, so issue WP 0 here */
ret = ivpu_pll_disable ( vdev ) ;
if ( ret )
ivpu_warn ( vdev , " Failed to disable PLL: %d \n " , ret ) ;
2023-07-31 18:12:54 +02:00
ret = ivpu_hw_37xx_d0i3_disable ( vdev ) ;
2023-01-17 10:27:17 +01:00
if ( ret )
ivpu_warn ( vdev , " Failed to disable D0I3: %d \n " , ret ) ;
ret = ivpu_pll_enable ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to enable PLL: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_boot_host_ss_configure ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to configure host SS: %d \n " , ret ) ;
return ret ;
}
/*
* The control circuitry for vpu_idle indication logic powers up active .
* To ensure unnecessary low power mode signal from LRT during bring up ,
* KMD disables the circuitry prior to bringing up the Main Power island .
*/
ivpu_boot_vpu_idle_gen_disable ( vdev ) ;
ret = ivpu_boot_pwr_domain_enable ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to enable power domain: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_boot_host_ss_axi_enable ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to enable AXI: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_boot_host_ss_top_noc_enable ( vdev ) ;
if ( ret )
ivpu_err ( vdev , " Failed to enable TOP NOC: %d \n " , ret ) ;
return ret ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_boot_fw ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
ivpu_boot_no_snoop_enable ( vdev ) ;
ivpu_boot_tbu_mmu_enable ( vdev ) ;
ivpu_boot_soc_cpu_boot ( vdev ) ;
return 0 ;
}
2023-07-31 18:12:54 +02:00
static bool ivpu_hw_37xx_is_idle ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
u32 val ;
if ( IVPU_WA ( punit_disabled ) )
return true ;
2023-07-31 18:12:54 +02:00
val = REGB_RD32 ( VPU_37XX_BUTTRESS_VPU_STATUS ) ;
return REG_TEST_FLD ( VPU_37XX_BUTTRESS_VPU_STATUS , READY , val ) & &
REG_TEST_FLD ( VPU_37XX_BUTTRESS_VPU_STATUS , IDLE , val ) ;
2023-01-17 10:27:17 +01:00
}
2023-10-28 15:34:15 +02:00
static int ivpu_hw_37xx_wait_for_idle ( struct ivpu_device * vdev )
{
return REGB_POLL_FLD ( VPU_37XX_BUTTRESS_VPU_STATUS , IDLE , 0x1 , IDLE_TIMEOUT_US ) ;
}
2023-10-28 15:34:12 +02:00
static void ivpu_hw_37xx_save_d0i3_entry_timestamp ( struct ivpu_device * vdev )
{
vdev - > hw - > d0i3_entry_host_ts = ktime_get_boottime ( ) ;
vdev - > hw - > d0i3_entry_vpu_ts = REGV_RD64 ( VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT ) ;
}
2023-07-31 18:12:54 +02:00
static int ivpu_hw_37xx_power_down ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
int ret = 0 ;
2023-10-28 15:34:12 +02:00
ivpu_hw_37xx_save_d0i3_entry_timestamp ( vdev ) ;
2023-11-15 12:10:04 +01:00
if ( ! ivpu_hw_37xx_is_idle ( vdev ) )
2024-02-14 09:13:05 +01:00
ivpu_warn ( vdev , " NPU not idle during power down \n " ) ;
2023-01-17 10:27:17 +01:00
2023-11-15 12:10:04 +01:00
if ( ivpu_hw_37xx_reset ( vdev ) ) {
2024-02-14 09:13:05 +01:00
ivpu_err ( vdev , " Failed to reset NPU \n " ) ;
2023-01-17 10:27:17 +01:00
ret = - EIO ;
}
2023-07-31 18:12:54 +02:00
if ( ivpu_hw_37xx_d0i3_enable ( vdev ) ) {
2023-05-25 12:38:17 +02:00
ivpu_err ( vdev , " Failed to enter D0I3 \n " ) ;
ret = - EIO ;
}
2023-01-17 10:27:17 +01:00
return ret ;
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_wdt_disable ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
u32 val ;
/* Enable writing and set non-zero WDT value */
2023-09-01 11:49:55 +02:00
REGV_WR32 ( VPU_37XX_CPU_SS_TIM_SAFE , TIM_SAFE_ENABLE ) ;
REGV_WR32 ( VPU_37XX_CPU_SS_TIM_WATCHDOG , TIM_WATCHDOG_RESET_VALUE ) ;
2023-01-17 10:27:17 +01:00
/* Enable writing and disable watchdog timer */
2023-09-01 11:49:55 +02:00
REGV_WR32 ( VPU_37XX_CPU_SS_TIM_SAFE , TIM_SAFE_ENABLE ) ;
REGV_WR32 ( VPU_37XX_CPU_SS_TIM_WDOG_EN , 0 ) ;
2023-01-17 10:27:17 +01:00
/* Now clear the timeout interrupt */
2023-09-01 11:49:55 +02:00
val = REGV_RD32 ( VPU_37XX_CPU_SS_TIM_GEN_CONFIG ) ;
val = REG_CLR_FLD ( VPU_37XX_CPU_SS_TIM_GEN_CONFIG , WDOG_TO_INT_CLR , val ) ;
REGV_WR32 ( VPU_37XX_CPU_SS_TIM_GEN_CONFIG , val ) ;
2023-01-17 10:27:17 +01:00
}
2023-10-28 17:59:29 +02:00
static u32 ivpu_hw_37xx_profiling_freq_get ( struct ivpu_device * vdev )
{
return PLL_PROF_CLK_FREQ ;
}
static void ivpu_hw_37xx_profiling_freq_drive ( struct ivpu_device * vdev , bool enable )
{
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
2024-04-02 12:49:27 +02:00
static u32 ivpu_hw_37xx_ratio_to_freq ( struct ivpu_device * vdev , u32 ratio )
2023-03-23 13:55:03 +01:00
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio ;
u32 cpu_clock ;
2024-04-02 12:49:27 +02:00
if ( ( vdev - > hw - > config & 0xff ) = = PLL_RATIO_4_3 )
2023-03-23 13:55:03 +01:00
cpu_clock = pll_clock * 2 / 4 ;
else
cpu_clock = pll_clock * 2 / 5 ;
return cpu_clock ;
}
2023-01-17 10:27:17 +01:00
/* Register indirect accesses */
2023-07-31 18:12:54 +02:00
static u32 ivpu_hw_37xx_reg_pll_freq_get ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
u32 pll_curr_ratio ;
2023-07-31 18:12:54 +02:00
pll_curr_ratio = REGB_RD32 ( VPU_37XX_BUTTRESS_CURRENT_PLL ) ;
pll_curr_ratio & = VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK ;
2023-01-17 10:27:17 +01:00
if ( ! ivpu_is_silicon ( vdev ) )
return PLL_SIMULATION_FREQ ;
2024-04-02 12:49:27 +02:00
return ivpu_hw_37xx_ratio_to_freq ( vdev , pll_curr_ratio ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static u32 ivpu_hw_37xx_reg_telemetry_offset_get ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
return REGB_RD32 ( VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static u32 ivpu_hw_37xx_reg_telemetry_size_get ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
return REGB_RD32 ( VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static u32 ivpu_hw_37xx_reg_telemetry_enable_get ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
return REGB_RD32 ( VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_reg_db_set ( struct ivpu_device * vdev , u32 db_id )
2023-01-17 10:27:17 +01:00
{
2023-09-01 11:49:55 +02:00
u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0 ;
u32 val = REG_FLD ( VPU_37XX_CPU_SS_DOORBELL_0 , SET ) ;
2023-01-17 10:27:17 +01:00
2023-09-01 11:49:55 +02:00
REGV_WR32I ( VPU_37XX_CPU_SS_DOORBELL_0 , reg_stride , db_id , val ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
return REGV_RD32 ( VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static u32 ivpu_hw_37xx_reg_ipc_rx_count_get ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
u32 count = REGV_RD32_SILENT ( VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
return REG_GET_FLD ( VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT , FILL_LEVEL , count ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_reg_ipc_tx_set ( struct ivpu_device * vdev , u32 vpu_addr )
2023-01-17 10:27:17 +01:00
{
2023-09-01 11:49:55 +02:00
REGV_WR32 ( VPU_37XX_CPU_SS_TIM_IPC_FIFO , vpu_addr ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_irq_clear ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
REGV_WR64 ( VPU_37XX_HOST_SS_ICB_CLEAR_0 , ICB_0_1_IRQ_MASK ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_irq_enable ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , ITF_FIREWALL_VIOLATION_MASK ) ;
REGV_WR64 ( VPU_37XX_HOST_SS_ICB_ENABLE_0 , ICB_0_1_IRQ_MASK ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_LOCAL_INT_MASK , BUTTRESS_IRQ_ENABLE_MASK ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_GLOBAL_INT_MASK , 0x0 ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_irq_disable ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
REGB_WR32 ( VPU_37XX_BUTTRESS_GLOBAL_INT_MASK , 0x1 ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_LOCAL_INT_MASK , BUTTRESS_IRQ_DISABLE_MASK ) ;
REGV_WR64 ( VPU_37XX_HOST_SS_ICB_ENABLE_0 , 0x0ull ) ;
REGV_WR32 ( VPU_37XX_HOST_SS_FW_SOC_IRQ_EN , 0x0 ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_irq_wdt_nce_handler ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2024-01-22 13:09:45 +01:00
ivpu_pm_trigger_recovery ( vdev , " WDT NCE IRQ " ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_irq_wdt_mss_handler ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
ivpu_hw_wdt_disable ( vdev ) ;
2024-01-22 13:09:45 +01:00
ivpu_pm_trigger_recovery ( vdev , " WDT MSS IRQ " ) ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_irq_noc_firewall_handler ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2024-01-22 13:09:45 +01:00
ivpu_pm_trigger_recovery ( vdev , " NOC Firewall IRQ " ) ;
2023-01-17 10:27:17 +01:00
}
/* Handler for IRQs from VPU core (irqV) */
2023-11-13 18:02:52 +01:00
static bool ivpu_hw_37xx_irqv_handler ( struct ivpu_device * vdev , int irq , bool * wake_thread )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
u32 status = REGV_RD32 ( VPU_37XX_HOST_SS_ICB_STATUS_0 ) & ICB_0_IRQ_MASK ;
2023-01-17 10:27:17 +01:00
2023-11-13 18:02:52 +01:00
if ( ! status )
return false ;
2023-07-31 18:12:54 +02:00
REGV_WR32 ( VPU_37XX_HOST_SS_ICB_CLEAR_0 , status ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , MMU_IRQ_0_INT , status ) )
2023-01-17 10:27:18 +01:00
ivpu_mmu_irq_evtq_handler ( vdev ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , HOST_IPC_FIFO_INT , status ) )
2023-11-13 18:02:52 +01:00
ivpu_ipc_irq_handler ( vdev , wake_thread ) ;
2023-01-17 10:27:20 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , MMU_IRQ_1_INT , status ) )
2023-01-17 10:27:18 +01:00
ivpu_dbg ( vdev , IRQ , " MMU sync complete \n " ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , MMU_IRQ_2_INT , status ) )
2023-01-17 10:27:18 +01:00
ivpu_mmu_irq_gerr_handler ( vdev ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , CPU_INT_REDIRECT_0_INT , status ) )
ivpu_hw_37xx_irq_wdt_mss_handler ( vdev ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , CPU_INT_REDIRECT_1_INT , status ) )
ivpu_hw_37xx_irq_wdt_nce_handler ( vdev ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , NOC_FIREWALL_INT , status ) )
ivpu_hw_37xx_irq_noc_firewall_handler ( vdev ) ;
2023-01-17 10:27:17 +01:00
2023-11-13 18:02:52 +01:00
return true ;
2023-01-17 10:27:17 +01:00
}
/* Handler for IRQs from Buttress core (irqB) */
2023-11-13 18:02:52 +01:00
static bool ivpu_hw_37xx_irqb_handler ( struct ivpu_device * vdev , int irq )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
u32 status = REGB_RD32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT ) & BUTTRESS_IRQ_MASK ;
2023-01-17 10:27:23 +01:00
bool schedule_recovery = false ;
2023-01-17 10:27:17 +01:00
2023-11-13 18:02:52 +01:00
if ( ! status )
return false ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , FREQ_CHANGE , status ) )
ivpu_dbg ( vdev , IRQ , " FREQ_CHANGE irq: %08x " ,
REGB_RD32 ( VPU_37XX_BUTTRESS_CURRENT_PLL ) ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , ATS_ERR , status ) ) {
ivpu_err ( vdev , " ATS_ERR irq 0x%016llx " , REGB_RD64 ( VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 ) ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_ATS_ERR_CLEAR , 0x1 ) ;
2023-01-17 10:27:23 +01:00
schedule_recovery = true ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , UFI_ERR , status ) ) {
u32 ufi_log = REGB_RD32 ( VPU_37XX_BUTTRESS_UFI_ERR_LOG ) ;
2023-01-17 10:27:17 +01:00
ivpu_err ( vdev , " UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx " ,
2023-07-31 18:12:54 +02:00
ufi_log , REG_GET_FLD ( VPU_37XX_BUTTRESS_UFI_ERR_LOG , OPCODE , ufi_log ) ,
REG_GET_FLD ( VPU_37XX_BUTTRESS_UFI_ERR_LOG , AXI_ID , ufi_log ) ,
REG_GET_FLD ( VPU_37XX_BUTTRESS_UFI_ERR_LOG , CQ_ID , ufi_log ) ) ;
REGB_WR32 ( VPU_37XX_BUTTRESS_UFI_ERR_CLEAR , 0x1 ) ;
2023-01-17 10:27:23 +01:00
schedule_recovery = true ;
2023-01-17 10:27:17 +01:00
}
2023-07-03 10:07:25 +02:00
/* This must be done after interrupts are cleared at the source. */
if ( IVPU_WA ( interrupt_clear_with_0 ) )
/*
* Writing 1 triggers an interrupt , so we can ' t perform read update write .
* Clear local interrupt status by writing 0 to all bits .
*/
2023-07-31 18:12:54 +02:00
REGB_WR32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , 0x0 ) ;
2023-07-03 10:07:25 +02:00
else
2023-07-31 18:12:54 +02:00
REGB_WR32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , status ) ;
2023-01-17 10:27:17 +01:00
2023-01-17 10:27:23 +01:00
if ( schedule_recovery )
2024-01-22 13:09:45 +01:00
ivpu_pm_trigger_recovery ( vdev , " Buttress IRQ " ) ;
2023-01-17 10:27:23 +01:00
2023-11-13 18:02:52 +01:00
return true ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static irqreturn_t ivpu_hw_37xx_irq_handler ( int irq , void * ptr )
2023-01-17 10:27:17 +01:00
{
struct ivpu_device * vdev = ptr ;
2023-11-13 18:02:52 +01:00
bool irqv_handled , irqb_handled , wake_thread = false ;
2023-01-17 10:27:17 +01:00
2023-10-24 18:19:52 +02:00
REGB_WR32 ( VPU_37XX_BUTTRESS_GLOBAL_INT_MASK , 0x1 ) ;
2023-11-13 18:02:52 +01:00
irqv_handled = ivpu_hw_37xx_irqv_handler ( vdev , irq , & wake_thread ) ;
irqb_handled = ivpu_hw_37xx_irqb_handler ( vdev , irq ) ;
2023-01-17 10:27:17 +01:00
2023-10-24 18:19:52 +02:00
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
REGB_WR32 ( VPU_37XX_BUTTRESS_GLOBAL_INT_MASK , 0x0 ) ;
2023-11-13 18:02:52 +01:00
if ( wake_thread )
return IRQ_WAKE_THREAD ;
if ( irqv_handled | | irqb_handled )
return IRQ_HANDLED ;
return IRQ_NONE ;
2023-01-17 10:27:17 +01:00
}
2023-07-31 18:12:54 +02:00
static void ivpu_hw_37xx_diagnose_failure ( struct ivpu_device * vdev )
2023-01-17 10:27:17 +01:00
{
2023-07-31 18:12:54 +02:00
u32 irqv = REGV_RD32 ( VPU_37XX_HOST_SS_ICB_STATUS_0 ) & ICB_0_IRQ_MASK ;
u32 irqb = REGB_RD32 ( VPU_37XX_BUTTRESS_INTERRUPT_STAT ) & BUTTRESS_IRQ_MASK ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( ivpu_hw_37xx_reg_ipc_rx_count_get ( vdev ) )
2023-01-17 10:27:17 +01:00
ivpu_err ( vdev , " IPC FIFO queue not empty, missed IPC IRQ " ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , CPU_INT_REDIRECT_0_INT , irqv ) )
2023-01-17 10:27:17 +01:00
ivpu_err ( vdev , " WDT MSS timeout detected \n " ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , CPU_INT_REDIRECT_1_INT , irqv ) )
2023-01-17 10:27:17 +01:00
ivpu_err ( vdev , " WDT NCE timeout detected \n " ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_HOST_SS_ICB_STATUS_0 , NOC_FIREWALL_INT , irqv ) )
2023-01-17 10:27:17 +01:00
ivpu_err ( vdev , " NOC Firewall irq detected \n " ) ;
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , ATS_ERR , irqb ) )
ivpu_err ( vdev , " ATS_ERR irq 0x%016llx " , REGB_RD64 ( VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 ) ) ;
2023-01-17 10:27:17 +01:00
2023-07-31 18:12:54 +02:00
if ( REG_TEST_FLD ( VPU_37XX_BUTTRESS_INTERRUPT_STAT , UFI_ERR , irqb ) ) {
u32 ufi_log = REGB_RD32 ( VPU_37XX_BUTTRESS_UFI_ERR_LOG ) ;
2023-01-17 10:27:17 +01:00
ivpu_err ( vdev , " UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx " ,
2023-07-31 18:12:54 +02:00
ufi_log , REG_GET_FLD ( VPU_37XX_BUTTRESS_UFI_ERR_LOG , OPCODE , ufi_log ) ,
REG_GET_FLD ( VPU_37XX_BUTTRESS_UFI_ERR_LOG , AXI_ID , ufi_log ) ,
REG_GET_FLD ( VPU_37XX_BUTTRESS_UFI_ERR_LOG , CQ_ID , ufi_log ) ) ;
2023-01-17 10:27:17 +01:00
}
}
2023-07-31 18:12:54 +02:00
const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
. info_init = ivpu_hw_37xx_info_init ,
. power_up = ivpu_hw_37xx_power_up ,
. is_idle = ivpu_hw_37xx_is_idle ,
2023-10-28 15:34:15 +02:00
. wait_for_idle = ivpu_hw_37xx_wait_for_idle ,
2023-07-31 18:12:54 +02:00
. power_down = ivpu_hw_37xx_power_down ,
2023-10-03 08:42:13 +02:00
. reset = ivpu_hw_37xx_reset ,
2023-07-31 18:12:54 +02:00
. boot_fw = ivpu_hw_37xx_boot_fw ,
. wdt_disable = ivpu_hw_37xx_wdt_disable ,
. diagnose_failure = ivpu_hw_37xx_diagnose_failure ,
2023-10-28 17:59:29 +02:00
. profiling_freq_get = ivpu_hw_37xx_profiling_freq_get ,
. profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive ,
2023-07-31 18:12:54 +02:00
. reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get ,
2024-04-02 12:49:27 +02:00
. ratio_to_freq = ivpu_hw_37xx_ratio_to_freq ,
2023-07-31 18:12:54 +02:00
. reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get ,
. reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get ,
. reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get ,
. reg_db_set = ivpu_hw_37xx_reg_db_set ,
. reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get ,
. reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get ,
. reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set ,
. irq_clear = ivpu_hw_37xx_irq_clear ,
. irq_enable = ivpu_hw_37xx_irq_enable ,
. irq_disable = ivpu_hw_37xx_irq_disable ,
. irq_handler = ivpu_hw_37xx_irq_handler ,
2023-01-17 10:27:17 +01:00
} ;