2019-03-04 14:07:37 +08:00
/*
* Copyright 2019 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include <linux/firmware.h>
# include <linux/slab.h>
# include <linux/module.h>
2019-07-31 10:39:40 -05:00
# include <linux/pci.h>
2019-03-04 14:07:37 +08:00
# include "amdgpu.h"
# include "amdgpu_atombios.h"
# include "amdgpu_ih.h"
# include "amdgpu_uvd.h"
# include "amdgpu_vce.h"
# include "amdgpu_ucode.h"
# include "amdgpu_psp.h"
2019-07-05 15:58:46 -05:00
# include "amdgpu_smu.h"
2019-03-04 14:07:37 +08:00
# include "atom.h"
# include "amd_pcie.h"
# include "gc/gc_10_1_0_offset.h"
# include "gc/gc_10_1_0_sh_mask.h"
# include "hdp/hdp_5_0_0_offset.h"
# include "hdp/hdp_5_0_0_sh_mask.h"
2019-11-13 14:27:54 -05:00
# include "smuio/smuio_11_0_0_offset.h"
2020-05-28 17:28:17 -04:00
# include "mp/mp_11_0_offset.h"
2019-03-04 14:07:37 +08:00
# include "soc15.h"
# include "soc15_common.h"
# include "gmc_v10_0.h"
# include "gfxhub_v2_0.h"
# include "mmhub_v2_0.h"
2019-08-23 19:39:18 +08:00
# include "nbio_v2_3.h"
2020-08-27 12:02:37 -04:00
# include "nbio_v7_2.h"
2019-03-04 14:07:37 +08:00
# include "nv.h"
# include "navi10_ih.h"
# include "gfx_v10_0.h"
# include "sdma_v5_0.h"
2019-06-17 13:38:29 +08:00
# include "sdma_v5_2.h"
2019-03-04 14:07:37 +08:00
# include "vcn_v2_0.h"
2019-11-08 15:01:42 -05:00
# include "jpeg_v2_0.h"
2020-03-24 16:30:24 -04:00
# include "vcn_v3_0.h"
2020-03-24 16:31:23 -04:00
# include "jpeg_v3_0.h"
2019-03-04 14:07:37 +08:00
# include "dce_virtual.h"
# include "mes_v10_1.h"
2019-09-11 17:29:07 +08:00
# include "mxgpu_nv.h"
2019-03-04 14:07:37 +08:00
static const struct amd_ip_funcs nv_common_ip_funcs ;
/*
* Indirect registers accessor
*/
static u32 nv_pcie_rreg ( struct amdgpu_device * adev , u32 reg )
{
2020-09-15 17:57:30 +08:00
unsigned long address , data ;
2019-08-23 19:39:18 +08:00
address = adev - > nbio . funcs - > get_pcie_index_offset ( adev ) ;
data = adev - > nbio . funcs - > get_pcie_data_offset ( adev ) ;
2019-03-04 14:07:37 +08:00
2020-09-15 17:57:30 +08:00
return amdgpu_device_indirect_rreg ( adev , address , data , reg ) ;
2019-03-04 14:07:37 +08:00
}
static void nv_pcie_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
2020-09-15 17:57:30 +08:00
unsigned long address , data ;
2019-03-04 14:07:37 +08:00
2019-08-23 19:39:18 +08:00
address = adev - > nbio . funcs - > get_pcie_index_offset ( adev ) ;
data = adev - > nbio . funcs - > get_pcie_data_offset ( adev ) ;
2019-03-04 14:07:37 +08:00
2020-09-15 17:57:30 +08:00
amdgpu_device_indirect_wreg ( adev , address , data , reg , v ) ;
2019-03-04 14:07:37 +08:00
}
2020-07-22 09:40:11 +08:00
static u64 nv_pcie_rreg64 ( struct amdgpu_device * adev , u32 reg )
{
2020-09-15 17:57:30 +08:00
unsigned long address , data ;
2020-07-22 09:40:11 +08:00
address = adev - > nbio . funcs - > get_pcie_index_offset ( adev ) ;
data = adev - > nbio . funcs - > get_pcie_data_offset ( adev ) ;
2020-09-15 17:57:30 +08:00
return amdgpu_device_indirect_rreg64 ( adev , address , data , reg ) ;
2020-07-22 09:40:11 +08:00
}
2020-08-27 12:01:26 -04:00
static u32 nv_pcie_port_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags , address , data ;
u32 r ;
address = adev - > nbio . funcs - > get_pcie_port_index_offset ( adev ) ;
data = adev - > nbio . funcs - > get_pcie_port_data_offset ( adev ) ;
spin_lock_irqsave ( & adev - > pcie_idx_lock , flags ) ;
WREG32 ( address , reg * 4 ) ;
( void ) RREG32 ( address ) ;
r = RREG32 ( data ) ;
spin_unlock_irqrestore ( & adev - > pcie_idx_lock , flags ) ;
return r ;
}
2020-07-22 09:40:11 +08:00
static void nv_pcie_wreg64 ( struct amdgpu_device * adev , u32 reg , u64 v )
{
2020-09-15 17:57:30 +08:00
unsigned long address , data ;
2020-07-22 09:40:11 +08:00
address = adev - > nbio . funcs - > get_pcie_index_offset ( adev ) ;
data = adev - > nbio . funcs - > get_pcie_data_offset ( adev ) ;
2020-09-15 17:57:30 +08:00
amdgpu_device_indirect_wreg64 ( adev , address , data , reg , v ) ;
2020-07-22 09:40:11 +08:00
}
2020-08-27 12:01:26 -04:00
static void nv_pcie_port_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags , address , data ;
address = adev - > nbio . funcs - > get_pcie_port_index_offset ( adev ) ;
data = adev - > nbio . funcs - > get_pcie_port_data_offset ( adev ) ;
spin_lock_irqsave ( & adev - > pcie_idx_lock , flags ) ;
WREG32 ( address , reg * 4 ) ;
( void ) RREG32 ( address ) ;
WREG32 ( data , v ) ;
( void ) RREG32 ( data ) ;
spin_unlock_irqrestore ( & adev - > pcie_idx_lock , flags ) ;
}
2019-03-04 14:07:37 +08:00
static u32 nv_didt_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags , address , data ;
u32 r ;
address = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_INDEX ) ;
data = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_DATA ) ;
spin_lock_irqsave ( & adev - > didt_idx_lock , flags ) ;
WREG32 ( address , ( reg ) ) ;
r = RREG32 ( data ) ;
spin_unlock_irqrestore ( & adev - > didt_idx_lock , flags ) ;
return r ;
}
static void nv_didt_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags , address , data ;
address = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_INDEX ) ;
data = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_DATA ) ;
spin_lock_irqsave ( & adev - > didt_idx_lock , flags ) ;
WREG32 ( address , ( reg ) ) ;
WREG32 ( data , ( v ) ) ;
spin_unlock_irqrestore ( & adev - > didt_idx_lock , flags ) ;
}
static u32 nv_get_config_memsize ( struct amdgpu_device * adev )
{
2019-08-23 19:39:18 +08:00
return adev - > nbio . funcs - > get_memsize ( adev ) ;
2019-03-04 14:07:37 +08:00
}
static u32 nv_get_xclk ( struct amdgpu_device * adev )
{
2019-05-14 11:37:32 +08:00
return adev - > clock . spll . reference_freq ;
2019-03-04 14:07:37 +08:00
}
void nv_grbm_select ( struct amdgpu_device * adev ,
u32 me , u32 pipe , u32 queue , u32 vmid )
{
u32 grbm_gfx_cntl = 0 ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , PIPEID , pipe ) ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , MEID , me ) ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , VMID , vmid ) ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , QUEUEID , queue ) ;
WREG32 ( SOC15_REG_OFFSET ( GC , 0 , mmGRBM_GFX_CNTL ) , grbm_gfx_cntl ) ;
}
static void nv_vga_set_state ( struct amdgpu_device * adev , bool state )
{
/* todo */
}
static bool nv_read_disabled_bios ( struct amdgpu_device * adev )
{
/* todo */
return false ;
}
static bool nv_read_bios_from_rom ( struct amdgpu_device * adev ,
u8 * bios , u32 length_bytes )
{
2019-11-13 14:27:54 -05:00
u32 * dw_ptr ;
u32 i , length_dw ;
if ( bios = = NULL )
return false ;
if ( length_bytes = = 0 )
return false ;
/* APU vbios image is part of sbios image */
if ( adev - > flags & AMD_IS_APU )
return false ;
dw_ptr = ( u32 * ) bios ;
length_dw = ALIGN ( length_bytes , 4 ) / 4 ;
/* set rom index to 0 */
WREG32 ( SOC15_REG_OFFSET ( SMUIO , 0 , mmROM_INDEX ) , 0 ) ;
/* read out the rom data */
for ( i = 0 ; i < length_dw ; i + + )
dw_ptr [ i ] = RREG32 ( SOC15_REG_OFFSET ( SMUIO , 0 , mmROM_DATA ) ) ;
return true ;
2019-03-04 14:07:37 +08:00
}
static struct soc15_allowed_register_entry nv_allowed_read_registers [ ] = {
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS2 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE0 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE2 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE3 ) } ,
{ SOC15_REG_ENTRY ( SDMA0 , 0 , mmSDMA0_STATUS_REG ) } ,
{ SOC15_REG_ENTRY ( SDMA1 , 0 , mmSDMA1_STATUS_REG ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STAT ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STALLED_STAT1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STALLED_STAT2 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STALLED_STAT3 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPF_BUSY_STAT ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPF_STALLED_STAT1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPF_STATUS ) } ,
2019-10-22 17:22:38 -04:00
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPC_BUSY_STAT ) } ,
2019-03-04 14:07:37 +08:00
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPC_STALLED_STAT1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPC_STATUS ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGB_ADDR_CONFIG ) } ,
} ;
static uint32_t nv_read_indexed_register ( struct amdgpu_device * adev , u32 se_num ,
u32 sh_num , u32 reg_offset )
{
uint32_t val ;
mutex_lock ( & adev - > grbm_idx_mutex ) ;
if ( se_num ! = 0xffffffff | | sh_num ! = 0xffffffff )
amdgpu_gfx_select_se_sh ( adev , se_num , sh_num , 0xffffffff ) ;
val = RREG32 ( reg_offset ) ;
if ( se_num ! = 0xffffffff | | sh_num ! = 0xffffffff )
amdgpu_gfx_select_se_sh ( adev , 0xffffffff , 0xffffffff , 0xffffffff ) ;
mutex_unlock ( & adev - > grbm_idx_mutex ) ;
return val ;
}
static uint32_t nv_get_register_value ( struct amdgpu_device * adev ,
bool indexed , u32 se_num ,
u32 sh_num , u32 reg_offset )
{
if ( indexed ) {
return nv_read_indexed_register ( adev , se_num , sh_num , reg_offset ) ;
} else {
if ( reg_offset = = SOC15_REG_OFFSET ( GC , 0 , mmGB_ADDR_CONFIG ) )
return adev - > gfx . config . gb_addr_config ;
return RREG32 ( reg_offset ) ;
}
}
static int nv_read_register ( struct amdgpu_device * adev , u32 se_num ,
u32 sh_num , u32 reg_offset , u32 * value )
{
uint32_t i ;
struct soc15_allowed_register_entry * en ;
* value = 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( nv_allowed_read_registers ) ; i + + ) {
en = & nv_allowed_read_registers [ i ] ;
2020-08-28 22:54:32 +08:00
if ( ( i = = 7 & & ( adev - > sdma . num_instances = = 1 ) ) | | /* some asics don't have SDMA1 */
reg_offset ! =
2019-03-04 14:07:37 +08:00
( adev - > reg_offset [ en - > hwip ] [ en - > inst ] [ en - > seg ] + en - > reg_offset ) )
continue ;
* value = nv_get_register_value ( adev ,
nv_allowed_read_registers [ i ] . grbm_indexed ,
se_num , sh_num , reg_offset ) ;
return 0 ;
}
return - EINVAL ;
}
2019-07-05 12:51:45 +08:00
static int nv_asic_mode1_reset ( struct amdgpu_device * adev )
{
u32 i ;
int ret = 0 ;
amdgpu_atombios_scratch_regs_engine_hung ( adev , true ) ;
/* disable BM */
pci_clear_master ( adev - > pdev ) ;
2020-08-24 12:30:47 -04:00
amdgpu_device_cache_pci_state ( adev - > pdev ) ;
2019-07-05 12:51:45 +08:00
2020-07-13 15:15:11 +08:00
if ( amdgpu_dpm_is_mode1_reset_supported ( adev ) ) {
dev_info ( adev - > dev , " GPU smu mode1 reset \n " ) ;
ret = amdgpu_dpm_mode1_reset ( adev ) ;
} else {
dev_info ( adev - > dev , " GPU psp mode1 reset \n " ) ;
ret = psp_gpu_reset ( adev ) ;
}
2019-07-05 12:51:45 +08:00
if ( ret )
dev_err ( adev - > dev , " GPU mode1 reset failed \n " ) ;
2020-08-24 12:30:47 -04:00
amdgpu_device_load_pci_state ( adev - > pdev ) ;
2019-07-05 12:51:45 +08:00
/* wait for asic to come out of reset */
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
2019-08-23 19:39:18 +08:00
u32 memsize = adev - > nbio . funcs - > get_memsize ( adev ) ;
2019-07-05 12:51:45 +08:00
if ( memsize ! = 0xffffffff )
break ;
udelay ( 1 ) ;
}
amdgpu_atombios_scratch_regs_engine_hung ( adev , false ) ;
return ret ;
}
2019-07-23 23:48:21 -05:00
2019-11-07 18:12:17 -05:00
static bool nv_asic_supports_baco ( struct amdgpu_device * adev )
{
struct smu_context * smu = & adev - > smu ;
if ( smu_baco_is_support ( smu ) )
return true ;
else
return false ;
}
2019-07-23 23:48:21 -05:00
static enum amd_reset_method
nv_asic_reset_method ( struct amdgpu_device * adev )
{
struct smu_context * smu = & adev - > smu ;
2020-07-14 16:29:18 +08:00
if ( amdgpu_reset_method = = AMD_RESET_METHOD_MODE1 | |
amdgpu_reset_method = = AMD_RESET_METHOD_BACO )
return amdgpu_reset_method ;
if ( amdgpu_reset_method ! = - 1 )
dev_warn ( adev - > dev , " Specified reset method:%d isn't supported, using AUTO instead. \n " ,
amdgpu_reset_method ) ;
2020-08-06 17:37:28 +08:00
switch ( adev - > asic_type ) {
case CHIP_SIENNA_CICHLID :
2020-08-25 15:39:57 +08:00
case CHIP_NAVY_FLOUNDER :
2020-12-15 18:04:04 +08:00
case CHIP_DIMGREY_CAVEFISH :
2019-07-23 23:48:21 -05:00
return AMD_RESET_METHOD_MODE1 ;
2020-08-06 17:37:28 +08:00
default :
if ( smu_baco_is_support ( smu ) )
return AMD_RESET_METHOD_BACO ;
else
return AMD_RESET_METHOD_MODE1 ;
}
2019-07-23 23:48:21 -05:00
}
2019-03-04 14:07:37 +08:00
static int nv_asic_reset ( struct amdgpu_device * adev )
{
2019-07-05 15:58:46 -05:00
int ret = 0 ;
struct smu_context * smu = & adev - > smu ;
2019-03-04 14:07:37 +08:00
2019-08-27 16:32:55 +08:00
if ( nv_asic_reset_method ( adev ) = = AMD_RESET_METHOD_BACO ) {
2020-08-11 12:02:21 -04:00
dev_info ( adev - > dev , " BACO reset \n " ) ;
2020-07-13 15:15:11 +08:00
2019-10-28 15:20:03 -04:00
ret = smu_baco_enter ( smu ) ;
if ( ret )
return ret ;
ret = smu_baco_exit ( smu ) ;
if ( ret )
return ret ;
2020-08-11 12:02:21 -04:00
} else {
dev_info ( adev - > dev , " MODE1 reset \n " ) ;
2019-07-05 12:51:45 +08:00
ret = nv_asic_mode1_reset ( adev ) ;
2020-08-11 12:02:21 -04:00
}
2019-07-05 15:58:46 -05:00
return ret ;
2019-03-04 14:07:37 +08:00
}
static int nv_set_uvd_clocks ( struct amdgpu_device * adev , u32 vclk , u32 dclk )
{
/* todo */
return 0 ;
}
static int nv_set_vce_clocks ( struct amdgpu_device * adev , u32 evclk , u32 ecclk )
{
/* todo */
return 0 ;
}
static void nv_pcie_gen3_enable ( struct amdgpu_device * adev )
{
if ( pci_is_root_bus ( adev - > pdev - > bus ) )
return ;
if ( amdgpu_pcie_gen2 = = 0 )
return ;
if ( ! ( adev - > pm . pcie_gen_mask & ( CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 ) ) )
return ;
/* todo */
}
static void nv_program_aspm ( struct amdgpu_device * adev )
{
if ( amdgpu_aspm = = 0 )
return ;
/* todo */
}
static void nv_enable_doorbell_aperture ( struct amdgpu_device * adev ,
bool enable )
{
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > enable_doorbell_aperture ( adev , enable ) ;
adev - > nbio . funcs - > enable_doorbell_selfring_aperture ( adev , enable ) ;
2019-03-04 14:07:37 +08:00
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_COMMON ,
. major = 1 ,
. minor = 0 ,
. rev = 0 ,
. funcs = & nv_common_ip_funcs ,
} ;
2019-08-05 16:19:45 +08:00
static int nv_reg_base_init ( struct amdgpu_device * adev )
2019-03-04 14:07:37 +08:00
{
2019-08-05 16:19:45 +08:00
int r ;
if ( amdgpu_discovery ) {
r = amdgpu_discovery_reg_base_init ( adev ) ;
if ( r ) {
DRM_WARN ( " failed to init reg base from ip discovery table, "
" fallback to legacy init method \n " ) ;
goto legacy_init ;
}
return 0 ;
}
legacy_init :
2019-03-04 14:07:37 +08:00
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
navi10_reg_base_init ( adev ) ;
break ;
2018-12-17 18:24:03 +08:00
case CHIP_NAVI14 :
navi14_reg_base_init ( adev ) ;
break ;
2019-05-14 15:22:53 +08:00
case CHIP_NAVI12 :
navi12_reg_base_init ( adev ) ;
break ;
2019-11-07 16:28:14 +08:00
case CHIP_SIENNA_CICHLID :
2020-02-11 14:00:39 +08:00
case CHIP_NAVY_FLOUNDER :
2019-11-07 16:28:14 +08:00
sienna_cichlid_reg_base_init ( adev ) ;
break ;
2020-08-27 10:46:19 -04:00
case CHIP_VANGOGH :
vangogh_reg_base_init ( adev ) ;
break ;
2020-10-02 11:34:02 -04:00
case CHIP_DIMGREY_CAVEFISH :
dimgrey_cavefish_reg_base_init ( adev ) ;
break ;
2019-03-04 14:07:37 +08:00
default :
return - EINVAL ;
}
2019-08-05 16:19:45 +08:00
return 0 ;
}
2020-06-23 11:35:05 +08:00
void nv_set_virt_ops ( struct amdgpu_device * adev )
{
adev - > virt . ops = & xgpu_nv_virt_ops ;
}
2020-10-28 14:04:29 +08:00
static bool nv_is_headless_sku ( struct pci_dev * pdev )
2020-10-22 11:40:26 +08:00
{
2020-10-27 14:58:19 +08:00
if ( ( pdev - > device = = 0x731E & &
( pdev - > revision = = 0xC6 | | pdev - > revision = = 0xC7 ) ) | |
( pdev - > device = = 0x7340 & & pdev - > revision = = 0xC9 ) )
2020-10-22 11:40:26 +08:00
return true ;
return false ;
}
2019-08-05 16:19:45 +08:00
int nv_set_ip_blocks ( struct amdgpu_device * adev )
{
int r ;
2020-08-27 12:02:37 -04:00
if ( adev - > flags & AMD_IS_APU ) {
adev - > nbio . funcs = & nbio_v7_2_funcs ;
adev - > nbio . hdp_flush_reg = & nbio_v7_2_hdp_flush_reg ;
} else {
adev - > nbio . funcs = & nbio_v2_3_funcs ;
adev - > nbio . hdp_flush_reg = & nbio_v2_3_hdp_flush_reg ;
}
2019-03-04 14:07:37 +08:00
2020-07-17 14:13:50 +08:00
if ( adev - > asic_type = = CHIP_SIENNA_CICHLID )
adev - > gmc . xgmi . supported = true ;
2020-03-04 23:51:51 +08:00
/* Set IP register base before any HW register access */
r = nv_reg_base_init ( adev ) ;
if ( r )
return r ;
2019-09-11 17:29:07 +08:00
2019-03-04 14:07:37 +08:00
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
2019-07-02 14:42:25 -05:00
case CHIP_NAVI14 :
2019-03-04 14:07:37 +08:00
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP & &
2020-01-07 16:57:39 +08:00
! amdgpu_sriov_vf ( adev ) )
2019-03-04 14:07:37 +08:00
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2019-07-05 15:39:39 -05:00
# if defined(CONFIG_DRM_AMD_DC)
2020-11-06 14:56:35 +08:00
else if ( amdgpu_device_has_dc_support ( adev ) )
2019-02-26 16:25:27 -05:00
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2019-07-05 15:39:39 -05:00
# endif
2019-03-04 14:07:37 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v5_0_ip_block ) ;
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_DIRECT & &
2020-01-07 16:57:39 +08:00
! amdgpu_sriov_vf ( adev ) )
2019-03-04 14:07:37 +08:00
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2020-10-28 14:04:29 +08:00
if ( ! nv_is_headless_sku ( adev - > pdev ) )
2020-10-22 11:40:26 +08:00
amdgpu_device_ip_block_add ( adev , & vcn_v2_0_ip_block ) ;
2019-11-08 15:01:42 -05:00
amdgpu_device_ip_block_add ( adev , & jpeg_v2_0_ip_block ) ;
2019-03-04 14:07:37 +08:00
if ( adev - > enable_mes )
amdgpu_device_ip_block_add ( adev , & mes_v10_1_ip_block ) ;
break ;
2019-05-16 19:58:19 +08:00
case CHIP_NAVI12 :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
2019-07-18 02:54:29 +08:00
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
2020-04-22 12:09:16 +08:00
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP )
2019-07-16 03:26:49 +08:00
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2019-06-26 19:19:57 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2019-08-30 16:31:58 +02:00
# if defined(CONFIG_DRM_AMD_DC)
2019-07-16 18:12:13 -04:00
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2019-08-30 16:31:58 +02:00
# endif
2019-05-16 19:58:19 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v5_0_ip_block ) ;
2019-07-16 03:26:49 +08:00
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_DIRECT & &
2020-01-07 16:57:39 +08:00
! amdgpu_sriov_vf ( adev ) )
2019-07-16 03:26:49 +08:00
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2019-07-18 10:13:23 -04:00
amdgpu_device_ip_block_add ( adev , & vcn_v2_0_ip_block ) ;
2020-03-05 21:10:03 +08:00
if ( ! amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & jpeg_v2_0_ip_block ) ;
2019-05-16 19:58:19 +08:00
break ;
2019-04-18 13:49:07 +08:00
case CHIP_SIENNA_CICHLID :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
2019-06-16 22:34:59 +08:00
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
2019-06-16 22:37:56 +08:00
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
2020-03-24 16:27:43 -04:00
if ( likely ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP ) )
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
2020-03-24 16:24:44 -04:00
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP & &
2020-10-16 15:54:59 +08:00
is_support_sw_smu ( adev ) )
2020-03-24 16:24:44 -04:00
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2019-08-14 17:39:03 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2020-05-21 12:57:27 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
# endif
2020-05-01 10:21:23 -04:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
2019-06-17 13:38:29 +08:00
amdgpu_device_ip_block_add ( adev , & sdma_v5_2_ip_block ) ;
2020-03-24 16:30:24 -04:00
amdgpu_device_ip_block_add ( adev , & vcn_v3_0_ip_block ) ;
2020-06-23 19:36:24 +08:00
if ( ! amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & jpeg_v3_0_ip_block ) ;
2020-03-24 16:28:43 -04:00
if ( adev - > enable_mes )
amdgpu_device_ip_block_add ( adev , & mes_v10_1_ip_block ) ;
2019-04-18 13:49:07 +08:00
break ;
2020-02-12 21:47:47 +08:00
case CHIP_NAVY_FLOUNDER :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
2020-02-12 22:19:37 +08:00
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
2020-02-12 22:32:01 +08:00
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
2020-07-08 17:07:26 -04:00
if ( likely ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP ) )
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP & &
is_support_sw_smu ( adev ) )
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2020-02-24 14:28:34 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2020-07-08 17:11:12 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
# endif
2020-02-13 15:43:15 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
2020-02-14 16:19:13 +08:00
amdgpu_device_ip_block_add ( adev , & sdma_v5_2_ip_block ) ;
2020-07-08 16:48:26 -04:00
amdgpu_device_ip_block_add ( adev , & vcn_v3_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & jpeg_v3_0_ip_block ) ;
2020-04-15 11:20:19 +08:00
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_DIRECT & &
is_support_sw_smu ( adev ) )
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2020-02-12 21:47:47 +08:00
break ;
2019-10-12 20:02:39 +08:00
case CHIP_VANGOGH :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
2020-09-17 12:02:12 -04:00
if ( likely ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP ) )
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
2020-05-28 15:46:41 +08:00
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2019-10-12 20:02:39 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2020-10-15 10:59:40 +08:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
# endif
2019-10-12 20:02:39 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v5_2_ip_block ) ;
2020-08-27 11:35:13 -04:00
amdgpu_device_ip_block_add ( adev , & vcn_v3_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & jpeg_v3_0_ip_block ) ;
2019-10-12 20:02:39 +08:00
break ;
2020-10-02 11:35:47 -04:00
case CHIP_DIMGREY_CAVEFISH :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
2020-10-02 11:38:30 -04:00
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
2020-10-02 11:39:28 -04:00
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
2020-06-04 15:20:39 +08:00
if ( likely ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP ) )
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP & &
is_support_sw_smu ( adev ) )
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2020-03-11 12:09:57 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2020-10-10 15:45:35 +08:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
# endif
2020-10-02 11:40:44 -04:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
2020-10-02 11:42:31 -04:00
amdgpu_device_ip_block_add ( adev , & sdma_v5_2_ip_block ) ;
2020-07-23 12:55:54 -04:00
amdgpu_device_ip_block_add ( adev , & vcn_v3_0_ip_block ) ;
2020-07-23 12:58:12 -04:00
amdgpu_device_ip_block_add ( adev , & jpeg_v3_0_ip_block ) ;
2020-10-02 11:35:47 -04:00
break ;
2019-03-04 14:07:37 +08:00
default :
return - EINVAL ;
}
return 0 ;
}
static uint32_t nv_get_rev_id ( struct amdgpu_device * adev )
{
2019-08-23 19:39:18 +08:00
return adev - > nbio . funcs - > get_rev_id ( adev ) ;
2019-03-04 14:07:37 +08:00
}
static void nv_flush_hdp ( struct amdgpu_device * adev , struct amdgpu_ring * ring )
{
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > hdp_flush ( adev , ring ) ;
2019-03-04 14:07:37 +08:00
}
static void nv_invalidate_hdp ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring )
{
if ( ! ring | | ! ring - > funcs - > emit_wreg ) {
2020-09-22 16:56:54 +08:00
WREG32_SOC15_NO_KIQ ( HDP , 0 , mmHDP_READ_CACHE_INVALIDATE , 1 ) ;
2019-03-04 14:07:37 +08:00
} else {
amdgpu_ring_emit_wreg ( ring , SOC15_REG_OFFSET (
HDP , 0 , mmHDP_READ_CACHE_INVALIDATE ) , 1 ) ;
}
}
static bool nv_need_full_reset ( struct amdgpu_device * adev )
{
return true ;
}
static bool nv_need_reset_on_init ( struct amdgpu_device * adev )
{
u32 sol_reg ;
if ( adev - > flags & AMD_IS_APU )
return false ;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded .
*/
sol_reg = RREG32_SOC15 ( MP0 , 0 , mmMP0_SMN_C2PMSG_81 ) ;
if ( sol_reg )
return true ;
2020-05-28 17:28:17 -04:00
2019-03-04 14:07:37 +08:00
return false ;
}
2019-11-05 18:53:30 +08:00
static uint64_t nv_get_pcie_replay_count ( struct amdgpu_device * adev )
{
/* TODO
* dummy implement for pcie_replay_count sysfs interface
* */
return 0 ;
}
2019-03-04 14:07:37 +08:00
static void nv_init_doorbell_index ( struct amdgpu_device * adev )
{
adev - > doorbell_index . kiq = AMDGPU_NAVI10_DOORBELL_KIQ ;
adev - > doorbell_index . mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0 ;
adev - > doorbell_index . mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1 ;
adev - > doorbell_index . mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2 ;
adev - > doorbell_index . mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3 ;
adev - > doorbell_index . mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4 ;
adev - > doorbell_index . mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5 ;
adev - > doorbell_index . mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6 ;
adev - > doorbell_index . mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7 ;
adev - > doorbell_index . userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START ;
adev - > doorbell_index . userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END ;
adev - > doorbell_index . gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0 ;
adev - > doorbell_index . gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1 ;
2019-04-26 18:58:41 +08:00
adev - > doorbell_index . mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING ;
2019-03-04 14:07:37 +08:00
adev - > doorbell_index . sdma_engine [ 0 ] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0 ;
adev - > doorbell_index . sdma_engine [ 1 ] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1 ;
2019-06-17 13:38:29 +08:00
adev - > doorbell_index . sdma_engine [ 2 ] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2 ;
adev - > doorbell_index . sdma_engine [ 3 ] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3 ;
2019-03-04 14:07:37 +08:00
adev - > doorbell_index . ih = AMDGPU_NAVI10_DOORBELL_IH ;
adev - > doorbell_index . vcn . vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1 ;
adev - > doorbell_index . vcn . vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3 ;
adev - > doorbell_index . vcn . vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5 ;
adev - > doorbell_index . vcn . vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7 ;
adev - > doorbell_index . first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP ;
adev - > doorbell_index . last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP ;
adev - > doorbell_index . max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT < < 1 ;
adev - > doorbell_index . sdma_doorbell_range = 20 ;
}
2020-08-19 17:04:47 -04:00
static void nv_pre_asic_init ( struct amdgpu_device * adev )
{
}
2020-08-18 17:58:06 +08:00
static int nv_update_umd_stable_pstate ( struct amdgpu_device * adev ,
bool enter )
{
if ( enter )
amdgpu_gfx_rlc_enter_safe_mode ( adev ) ;
else
amdgpu_gfx_rlc_exit_safe_mode ( adev ) ;
if ( adev - > gfx . funcs - > update_perfmon_mgcg )
adev - > gfx . funcs - > update_perfmon_mgcg ( adev , ! enter ) ;
/*
* The ASPM function is not fully enabled and verified on
* Navi yet . Temporarily skip this until ASPM enabled .
*/
#if 0
if ( adev - > nbio . funcs - > enable_aspm )
adev - > nbio . funcs - > enable_aspm ( adev , ! enter ) ;
# endif
return 0 ;
}
2019-03-04 14:07:37 +08:00
static const struct amdgpu_asic_funcs nv_asic_funcs =
{
. read_disabled_bios = & nv_read_disabled_bios ,
. read_bios_from_rom = & nv_read_bios_from_rom ,
. read_register = & nv_read_register ,
. reset = & nv_asic_reset ,
2019-07-23 23:48:21 -05:00
. reset_method = & nv_asic_reset_method ,
2019-03-04 14:07:37 +08:00
. set_vga_state = & nv_vga_set_state ,
. get_xclk = & nv_get_xclk ,
. set_uvd_clocks = & nv_set_uvd_clocks ,
. set_vce_clocks = & nv_set_vce_clocks ,
. get_config_memsize = & nv_get_config_memsize ,
. flush_hdp = & nv_flush_hdp ,
. invalidate_hdp = & nv_invalidate_hdp ,
. init_doorbell_index = & nv_init_doorbell_index ,
. need_full_reset = & nv_need_full_reset ,
. need_reset_on_init = & nv_need_reset_on_init ,
2019-11-05 18:53:30 +08:00
. get_pcie_replay_count = & nv_get_pcie_replay_count ,
2019-11-07 18:12:17 -05:00
. supports_baco = & nv_asic_supports_baco ,
2020-08-19 17:04:47 -04:00
. pre_asic_init = & nv_pre_asic_init ,
2020-08-18 17:58:06 +08:00
. update_umd_stable_pstate = & nv_update_umd_stable_pstate ,
2019-03-04 14:07:37 +08:00
} ;
static int nv_common_early_init ( void * handle )
{
2019-09-27 23:30:05 -04:00
# define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
2019-03-04 14:07:37 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2019-09-27 23:30:05 -04:00
adev - > rmmio_remap . reg_offset = MMIO_REG_HOLE_OFFSET ;
adev - > rmmio_remap . bus_addr = adev - > rmmio_base + MMIO_REG_HOLE_OFFSET ;
2019-03-04 14:07:37 +08:00
adev - > smc_rreg = NULL ;
adev - > smc_wreg = NULL ;
adev - > pcie_rreg = & nv_pcie_rreg ;
adev - > pcie_wreg = & nv_pcie_wreg ;
2020-07-22 09:40:11 +08:00
adev - > pcie_rreg64 = & nv_pcie_rreg64 ;
adev - > pcie_wreg64 = & nv_pcie_wreg64 ;
2020-08-27 12:01:26 -04:00
adev - > pciep_rreg = & nv_pcie_port_rreg ;
adev - > pciep_wreg = & nv_pcie_port_wreg ;
2019-03-04 14:07:37 +08:00
/* TODO: will add them during VCN v2 implementation */
adev - > uvd_ctx_rreg = NULL ;
adev - > uvd_ctx_wreg = NULL ;
adev - > didt_rreg = & nv_didt_rreg ;
adev - > didt_wreg = & nv_didt_wreg ;
adev - > asic_funcs = & nv_asic_funcs ;
adev - > rev_id = nv_get_rev_id ( adev ) ;
adev - > external_rev_id = 0xff ;
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
2019-11-11 15:09:25 -05:00
AMD_CG_SUPPORT_JPEG_MGCG |
2019-03-04 14:07:37 +08:00
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS ;
2019-05-15 13:58:20 -04:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2019-06-14 16:12:51 +08:00
AMD_PG_SUPPORT_VCN_DPG |
2019-11-11 15:09:25 -05:00
AMD_PG_SUPPORT_JPEG |
2019-06-14 16:19:36 +08:00
AMD_PG_SUPPORT_ATHUB ;
2019-03-04 14:07:37 +08:00
adev - > external_rev_id = adev - > rev_id + 0x1 ;
break ;
2018-12-17 18:23:27 +08:00
case CHIP_NAVI14 :
2019-03-20 16:12:54 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
2019-11-11 15:09:25 -05:00
AMD_CG_SUPPORT_JPEG_MGCG |
2019-03-20 16:12:54 +08:00
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS ;
2019-07-02 12:52:52 -05:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2019-11-11 15:09:25 -05:00
AMD_PG_SUPPORT_JPEG |
2019-07-02 12:52:52 -05:00
AMD_PG_SUPPORT_VCN_DPG ;
2019-08-05 17:32:45 +08:00
adev - > external_rev_id = adev - > rev_id + 20 ;
2018-12-17 18:23:27 +08:00
break ;
2019-05-16 19:47:33 +08:00
case CHIP_NAVI12 :
2019-07-30 11:28:20 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CP_LS |
2019-08-01 15:00:28 +08:00
AMD_CG_SUPPORT_GFX_RLC_LS |
2019-08-01 15:01:23 +08:00
AMD_CG_SUPPORT_IH_CG |
2019-08-01 15:00:28 +08:00
AMD_CG_SUPPORT_HDP_MGCG |
2019-07-30 12:18:55 +08:00
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
2019-08-01 15:39:59 +08:00
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
2019-08-01 15:19:10 +08:00
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
2019-08-01 15:22:59 +08:00
AMD_CG_SUPPORT_ATHUB_LS |
2019-11-11 15:09:25 -05:00
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG ;
2019-08-27 11:05:23 +08:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2019-08-27 11:06:13 +08:00
AMD_PG_SUPPORT_VCN_DPG |
2019-11-11 15:09:25 -05:00
AMD_PG_SUPPORT_JPEG |
2020-07-06 10:54:26 +08:00
AMD_PG_SUPPORT_ATHUB ;
2020-01-08 13:44:29 +08:00
/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
* as a consequence , the rev_id and external_rev_id are wrong .
* workaround it by hardcoding rev_id to 0 ( default value ) .
*/
if ( amdgpu_sriov_vf ( adev ) )
adev - > rev_id = 0 ;
2019-05-16 19:47:33 +08:00
adev - > external_rev_id = adev - > rev_id + 0xa ;
break ;
2019-03-19 11:04:03 +08:00
case CHIP_SIENNA_CICHLID :
2020-01-24 03:57:55 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG |
2020-03-18 17:33:47 -04:00
AMD_CG_SUPPORT_MC_MGCG |
2020-01-24 03:57:55 +08:00
AMD_CG_SUPPORT_VCN_MGCG |
2020-02-28 11:57:04 +08:00
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_HDP_MGCG |
2020-02-28 14:09:31 +08:00
AMD_CG_SUPPORT_HDP_LS |
2020-02-28 14:14:00 +08:00
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_MC_LS ;
2019-12-03 09:23:24 -05:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2020-04-02 13:28:07 -04:00
AMD_PG_SUPPORT_VCN_DPG |
2020-03-26 12:01:15 +08:00
AMD_PG_SUPPORT_JPEG |
2020-07-06 10:54:26 +08:00
AMD_PG_SUPPORT_ATHUB |
AMD_PG_SUPPORT_MMHUB ;
2020-06-23 19:36:24 +08:00
if ( amdgpu_sriov_vf ( adev ) ) {
/* hypervisor control CG and PG enablement */
adev - > cg_flags = 0 ;
adev - > pg_flags = 0 ;
}
2019-03-19 11:04:03 +08:00
adev - > external_rev_id = adev - > rev_id + 0x28 ;
break ;
2020-02-10 17:00:28 +08:00
case CHIP_NAVY_FLOUNDER :
2020-07-02 15:34:37 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_VCN_MGCG |
2020-07-08 18:53:36 +08:00
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
2020-07-08 18:59:11 +08:00
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_HDP_MGCG |
2020-07-08 19:02:14 +08:00
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_IH_CG ;
2020-07-01 17:59:51 -04:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2020-07-01 18:02:32 -04:00
AMD_PG_SUPPORT_VCN_DPG |
2020-07-08 18:42:04 +08:00
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB |
AMD_PG_SUPPORT_MMHUB ;
2020-02-10 17:00:28 +08:00
adev - > external_rev_id = adev - > rev_id + 0x32 ;
break ;
2020-08-27 10:46:19 -04:00
case CHIP_VANGOGH :
2020-10-26 20:43:41 +08:00
adev - > apu_flags | = AMD_APU_IS_VANGOGH ;
2020-10-27 21:37:49 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CGCG |
2020-09-22 19:08:31 +08:00
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
2020-10-16 18:19:15 -04:00
AMD_CG_SUPPORT_GFX_3D_CGLS |
2020-10-30 14:52:46 +08:00
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
2020-11-03 14:01:59 +08:00
AMD_CG_SUPPORT_GFX_FGCG |
2020-10-16 18:19:15 -04:00
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG ;
adev - > pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG ;
2020-10-26 20:43:41 +08:00
if ( adev - > apu_flags & AMD_APU_IS_VANGOGH )
adev - > external_rev_id = adev - > rev_id + 0x01 ;
2020-08-27 10:46:19 -04:00
break ;
2020-10-02 11:30:54 -04:00
case CHIP_DIMGREY_CAVEFISH :
2020-08-10 17:15:23 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_VCN_MGCG |
2020-08-10 17:34:30 +08:00
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
2020-08-10 17:46:17 +08:00
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_HDP_MGCG |
2020-08-10 17:48:34 +08:00
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_IH_CG ;
2020-08-05 17:54:21 -04:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2020-08-05 17:59:09 -04:00
AMD_PG_SUPPORT_VCN_DPG |
2020-08-10 17:38:47 +08:00
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB |
AMD_PG_SUPPORT_MMHUB ;
2020-10-02 11:30:54 -04:00
adev - > external_rev_id = adev - > rev_id + 0x3c ;
break ;
2019-03-04 14:07:37 +08:00
default :
/* FIXME: not supported yet */
return - EINVAL ;
}
2019-09-11 17:29:07 +08:00
if ( amdgpu_sriov_vf ( adev ) ) {
amdgpu_virt_init_setting ( adev ) ;
xgpu_nv_mailbox_set_irq_funcs ( adev ) ;
}
2019-03-04 14:07:37 +08:00
return 0 ;
}
static int nv_common_late_init ( void * handle )
{
2019-09-11 17:29:07 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) )
xgpu_nv_mailbox_get_irq ( adev ) ;
2019-03-04 14:07:37 +08:00
return 0 ;
}
static int nv_common_sw_init ( void * handle )
{
2019-09-11 17:29:07 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) )
xgpu_nv_mailbox_add_irq_id ( adev ) ;
2019-03-04 14:07:37 +08:00
return 0 ;
}
static int nv_common_sw_fini ( void * handle )
{
return 0 ;
}
static int nv_common_hw_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
/* enable pcie gen2/3 link */
nv_pcie_gen3_enable ( adev ) ;
/* enable aspm */
nv_program_aspm ( adev ) ;
/* setup nbio registers */
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > init_registers ( adev ) ;
2019-09-27 23:30:05 -04:00
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
if ( adev - > nbio . funcs - > remap_hdp_registers )
adev - > nbio . funcs - > remap_hdp_registers ( adev ) ;
2019-03-04 14:07:37 +08:00
/* enable the doorbell aperture */
nv_enable_doorbell_aperture ( adev , true ) ;
return 0 ;
}
static int nv_common_hw_fini ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
/* disable the doorbell aperture */
nv_enable_doorbell_aperture ( adev , false ) ;
return 0 ;
}
static int nv_common_suspend ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
return nv_common_hw_fini ( adev ) ;
}
static int nv_common_resume ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
return nv_common_hw_init ( adev ) ;
}
static bool nv_common_is_idle ( void * handle )
{
return true ;
}
static int nv_common_wait_for_idle ( void * handle )
{
return 0 ;
}
static int nv_common_soft_reset ( void * handle )
{
return 0 ;
}
static void nv_update_hdp_mem_power_gating ( struct amdgpu_device * adev ,
bool enable )
{
uint32_t hdp_clk_cntl , hdp_clk_cntl1 ;
uint32_t hdp_mem_pwr_cntl ;
if ( ! ( adev - > cg_flags & ( AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD ) ) )
return ;
hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL ) ;
hdp_mem_pwr_cntl = RREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL ) ;
/* Before doing clock/power mode switch,
* forced on IPH & RC clock */
hdp_clk_cntl = REG_SET_FIELD ( hdp_clk_cntl , HDP_CLK_CNTL ,
IPH_MEM_CLK_SOFT_OVERRIDE , 1 ) ;
hdp_clk_cntl = REG_SET_FIELD ( hdp_clk_cntl , HDP_CLK_CNTL ,
RC_MEM_CLK_SOFT_OVERRIDE , 1 ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL , hdp_clk_cntl ) ;
/* HDP 5.0 doesn't support dynamic power mode switch,
* disable clock and power gating before any changing */
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_CTRL_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_LS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_DS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_SD_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_CTRL_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_LS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_DS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_SD_EN , 0 ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL , hdp_mem_pwr_cntl ) ;
/* only one clock gating mode (LS/DS/SD) can be enabled */
if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_LS ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_LS_EN , enable ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_LS_EN , enable ) ;
} else if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_DS ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_DS_EN , enable ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_DS_EN , enable ) ;
} else if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_SD ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_SD_EN , enable ) ;
/* RC should not use shut down mode, fallback to ds */
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_DS_EN , enable ) ;
}
2020-02-28 11:57:04 +08:00
/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
* be set for SRAM LS / DS / SD */
if ( adev - > cg_flags & ( AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD ) ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_CTRL_EN , 1 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_CTRL_EN , 1 ) ;
}
2019-03-04 14:07:37 +08:00
WREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL , hdp_mem_pwr_cntl ) ;
/* restore IPH & RC clock override after clock/power mode changing */
WREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL , hdp_clk_cntl1 ) ;
}
static void nv_update_hdp_clock_gating ( struct amdgpu_device * adev ,
bool enable )
{
uint32_t hdp_clk_cntl ;
if ( ! ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_MGCG ) )
return ;
hdp_clk_cntl = RREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL ) ;
if ( enable ) {
hdp_clk_cntl & =
~ ( uint32_t )
( HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK ) ;
} else {
hdp_clk_cntl | = HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK ;
}
WREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL , hdp_clk_cntl ) ;
}
static int nv_common_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) )
return 0 ;
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
2018-12-17 18:23:27 +08:00
case CHIP_NAVI14 :
2019-05-16 19:51:12 +08:00
case CHIP_NAVI12 :
2019-03-19 11:04:03 +08:00
case CHIP_SIENNA_CICHLID :
2020-02-10 17:00:28 +08:00
case CHIP_NAVY_FLOUNDER :
2020-10-02 11:30:54 -04:00
case CHIP_DIMGREY_CAVEFISH :
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > update_medium_grain_clock_gating ( adev ,
2020-01-20 13:54:30 +01:00
state = = AMD_CG_STATE_GATE ) ;
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > update_medium_grain_light_sleep ( adev ,
2020-01-20 13:54:30 +01:00
state = = AMD_CG_STATE_GATE ) ;
2019-03-04 14:07:37 +08:00
nv_update_hdp_mem_power_gating ( adev ,
2020-01-20 13:54:30 +01:00
state = = AMD_CG_STATE_GATE ) ;
2019-03-04 14:07:37 +08:00
nv_update_hdp_clock_gating ( adev ,
2020-01-20 13:54:30 +01:00
state = = AMD_CG_STATE_GATE ) ;
2019-03-04 14:07:37 +08:00
break ;
default :
break ;
}
return 0 ;
}
static int nv_common_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
{
/* TODO */
return 0 ;
}
static void nv_common_get_clockgating_state ( void * handle , u32 * flags )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
uint32_t tmp ;
if ( amdgpu_sriov_vf ( adev ) )
* flags = 0 ;
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > get_clockgating_state ( adev , flags ) ;
2019-03-04 14:07:37 +08:00
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL ) ;
if ( ! ( tmp & ( HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK ) ) )
* flags | = AMD_CG_SUPPORT_HDP_MGCG ;
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL ) ;
if ( tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK )
* flags | = AMD_CG_SUPPORT_HDP_LS ;
else if ( tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK )
* flags | = AMD_CG_SUPPORT_HDP_DS ;
else if ( tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK )
* flags | = AMD_CG_SUPPORT_HDP_SD ;
return ;
}
static const struct amd_ip_funcs nv_common_ip_funcs = {
. name = " nv_common " ,
. early_init = nv_common_early_init ,
. late_init = nv_common_late_init ,
. sw_init = nv_common_sw_init ,
. sw_fini = nv_common_sw_fini ,
. hw_init = nv_common_hw_init ,
. hw_fini = nv_common_hw_fini ,
. suspend = nv_common_suspend ,
. resume = nv_common_resume ,
. is_idle = nv_common_is_idle ,
. wait_for_idle = nv_common_wait_for_idle ,
. soft_reset = nv_common_soft_reset ,
. set_clockgating_state = nv_common_set_clockgating_state ,
. set_powergating_state = nv_common_set_powergating_state ,
. get_clockgating_state = nv_common_get_clockgating_state ,
} ;