2019-03-04 14:07:37 +08:00
/*
* Copyright 2019 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include <linux/firmware.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <drm/drmP.h>
# include "amdgpu.h"
# include "amdgpu_atombios.h"
# include "amdgpu_ih.h"
# include "amdgpu_uvd.h"
# include "amdgpu_vce.h"
# include "amdgpu_ucode.h"
# include "amdgpu_psp.h"
2019-07-05 15:58:46 -05:00
# include "amdgpu_smu.h"
2019-03-04 14:07:37 +08:00
# include "atom.h"
# include "amd_pcie.h"
# include "gc/gc_10_1_0_offset.h"
# include "gc/gc_10_1_0_sh_mask.h"
# include "hdp/hdp_5_0_0_offset.h"
# include "hdp/hdp_5_0_0_sh_mask.h"
# include "soc15.h"
# include "soc15_common.h"
# include "gmc_v10_0.h"
# include "gfxhub_v2_0.h"
# include "mmhub_v2_0.h"
# include "nv.h"
# include "navi10_ih.h"
# include "gfx_v10_0.h"
# include "sdma_v5_0.h"
# include "vcn_v2_0.h"
# include "dce_virtual.h"
# include "mes_v10_1.h"
static const struct amd_ip_funcs nv_common_ip_funcs ;
/*
* Indirect registers accessor
*/
static u32 nv_pcie_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags , address , data ;
u32 r ;
address = adev - > nbio_funcs - > get_pcie_index_offset ( adev ) ;
data = adev - > nbio_funcs - > get_pcie_data_offset ( adev ) ;
spin_lock_irqsave ( & adev - > pcie_idx_lock , flags ) ;
WREG32 ( address , reg ) ;
( void ) RREG32 ( address ) ;
r = RREG32 ( data ) ;
spin_unlock_irqrestore ( & adev - > pcie_idx_lock , flags ) ;
return r ;
}
static void nv_pcie_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags , address , data ;
address = adev - > nbio_funcs - > get_pcie_index_offset ( adev ) ;
data = adev - > nbio_funcs - > get_pcie_data_offset ( adev ) ;
spin_lock_irqsave ( & adev - > pcie_idx_lock , flags ) ;
WREG32 ( address , reg ) ;
( void ) RREG32 ( address ) ;
WREG32 ( data , v ) ;
( void ) RREG32 ( data ) ;
spin_unlock_irqrestore ( & adev - > pcie_idx_lock , flags ) ;
}
static u32 nv_didt_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags , address , data ;
u32 r ;
address = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_INDEX ) ;
data = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_DATA ) ;
spin_lock_irqsave ( & adev - > didt_idx_lock , flags ) ;
WREG32 ( address , ( reg ) ) ;
r = RREG32 ( data ) ;
spin_unlock_irqrestore ( & adev - > didt_idx_lock , flags ) ;
return r ;
}
static void nv_didt_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags , address , data ;
address = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_INDEX ) ;
data = SOC15_REG_OFFSET ( GC , 0 , mmDIDT_IND_DATA ) ;
spin_lock_irqsave ( & adev - > didt_idx_lock , flags ) ;
WREG32 ( address , ( reg ) ) ;
WREG32 ( data , ( v ) ) ;
spin_unlock_irqrestore ( & adev - > didt_idx_lock , flags ) ;
}
static u32 nv_get_config_memsize ( struct amdgpu_device * adev )
{
return adev - > nbio_funcs - > get_memsize ( adev ) ;
}
static u32 nv_get_xclk ( struct amdgpu_device * adev )
{
2019-05-14 11:37:32 +08:00
return adev - > clock . spll . reference_freq ;
2019-03-04 14:07:37 +08:00
}
void nv_grbm_select ( struct amdgpu_device * adev ,
u32 me , u32 pipe , u32 queue , u32 vmid )
{
u32 grbm_gfx_cntl = 0 ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , PIPEID , pipe ) ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , MEID , me ) ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , VMID , vmid ) ;
grbm_gfx_cntl = REG_SET_FIELD ( grbm_gfx_cntl , GRBM_GFX_CNTL , QUEUEID , queue ) ;
WREG32 ( SOC15_REG_OFFSET ( GC , 0 , mmGRBM_GFX_CNTL ) , grbm_gfx_cntl ) ;
}
static void nv_vga_set_state ( struct amdgpu_device * adev , bool state )
{
/* todo */
}
static bool nv_read_disabled_bios ( struct amdgpu_device * adev )
{
/* todo */
return false ;
}
static bool nv_read_bios_from_rom ( struct amdgpu_device * adev ,
u8 * bios , u32 length_bytes )
{
/* TODO: will implement it when SMU header is available */
return false ;
}
static struct soc15_allowed_register_entry nv_allowed_read_registers [ ] = {
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS2 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE0 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE2 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGRBM_STATUS_SE3 ) } ,
#if 0 /* TODO: will set it when SDMA header is available */
{ SOC15_REG_ENTRY ( SDMA0 , 0 , mmSDMA0_STATUS_REG ) } ,
{ SOC15_REG_ENTRY ( SDMA1 , 0 , mmSDMA1_STATUS_REG ) } ,
# endif
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STAT ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STALLED_STAT1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STALLED_STAT2 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_STALLED_STAT3 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPF_BUSY_STAT ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPF_STALLED_STAT1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPF_STATUS ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPC_STALLED_STAT1 ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmCP_CPC_STATUS ) } ,
{ SOC15_REG_ENTRY ( GC , 0 , mmGB_ADDR_CONFIG ) } ,
} ;
static uint32_t nv_read_indexed_register ( struct amdgpu_device * adev , u32 se_num ,
u32 sh_num , u32 reg_offset )
{
uint32_t val ;
mutex_lock ( & adev - > grbm_idx_mutex ) ;
if ( se_num ! = 0xffffffff | | sh_num ! = 0xffffffff )
amdgpu_gfx_select_se_sh ( adev , se_num , sh_num , 0xffffffff ) ;
val = RREG32 ( reg_offset ) ;
if ( se_num ! = 0xffffffff | | sh_num ! = 0xffffffff )
amdgpu_gfx_select_se_sh ( adev , 0xffffffff , 0xffffffff , 0xffffffff ) ;
mutex_unlock ( & adev - > grbm_idx_mutex ) ;
return val ;
}
static uint32_t nv_get_register_value ( struct amdgpu_device * adev ,
bool indexed , u32 se_num ,
u32 sh_num , u32 reg_offset )
{
if ( indexed ) {
return nv_read_indexed_register ( adev , se_num , sh_num , reg_offset ) ;
} else {
if ( reg_offset = = SOC15_REG_OFFSET ( GC , 0 , mmGB_ADDR_CONFIG ) )
return adev - > gfx . config . gb_addr_config ;
return RREG32 ( reg_offset ) ;
}
}
static int nv_read_register ( struct amdgpu_device * adev , u32 se_num ,
u32 sh_num , u32 reg_offset , u32 * value )
{
uint32_t i ;
struct soc15_allowed_register_entry * en ;
* value = 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( nv_allowed_read_registers ) ; i + + ) {
en = & nv_allowed_read_registers [ i ] ;
if ( reg_offset ! =
( adev - > reg_offset [ en - > hwip ] [ en - > inst ] [ en - > seg ] + en - > reg_offset ) )
continue ;
* value = nv_get_register_value ( adev ,
nv_allowed_read_registers [ i ] . grbm_indexed ,
se_num , sh_num , reg_offset ) ;
return 0 ;
}
return - EINVAL ;
}
#if 0
static void nv_gpu_pci_config_reset ( struct amdgpu_device * adev )
{
u32 i ;
dev_info ( adev - > dev , " GPU pci config reset \n " ) ;
/* disable BM */
pci_clear_master ( adev - > pdev ) ;
/* reset */
amdgpu_pci_config_reset ( adev ) ;
udelay ( 100 ) ;
/* wait for asic to come out of reset */
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
u32 memsize = nbio_v2_3_get_memsize ( adev ) ;
if ( memsize ! = 0xffffffff )
break ;
udelay ( 1 ) ;
}
}
# endif
2019-07-05 12:51:45 +08:00
static int nv_asic_mode1_reset ( struct amdgpu_device * adev )
{
u32 i ;
int ret = 0 ;
amdgpu_atombios_scratch_regs_engine_hung ( adev , true ) ;
dev_info ( adev - > dev , " GPU mode1 reset \n " ) ;
/* disable BM */
pci_clear_master ( adev - > pdev ) ;
pci_save_state ( adev - > pdev ) ;
ret = psp_gpu_reset ( adev ) ;
if ( ret )
dev_err ( adev - > dev , " GPU mode1 reset failed \n " ) ;
pci_restore_state ( adev - > pdev ) ;
/* wait for asic to come out of reset */
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
u32 memsize = adev - > nbio_funcs - > get_memsize ( adev ) ;
if ( memsize ! = 0xffffffff )
break ;
udelay ( 1 ) ;
}
amdgpu_atombios_scratch_regs_engine_hung ( adev , false ) ;
return ret ;
}
2019-03-04 14:07:37 +08:00
static int nv_asic_reset ( struct amdgpu_device * adev )
{
/* FIXME: it doesn't work since vega10 */
#if 0
amdgpu_atombios_scratch_regs_engine_hung ( adev , true ) ;
nv_gpu_pci_config_reset ( adev ) ;
amdgpu_atombios_scratch_regs_engine_hung ( adev , false ) ;
# endif
2019-07-05 15:58:46 -05:00
int ret = 0 ;
struct smu_context * smu = & adev - > smu ;
2019-03-04 14:07:37 +08:00
2019-07-05 12:51:45 +08:00
if ( smu_baco_is_support ( smu ) )
2019-07-05 15:58:46 -05:00
ret = smu_baco_reset ( smu ) ;
2019-07-05 12:51:45 +08:00
else
ret = nv_asic_mode1_reset ( adev ) ;
2019-07-05 15:58:46 -05:00
return ret ;
2019-03-04 14:07:37 +08:00
}
static int nv_set_uvd_clocks ( struct amdgpu_device * adev , u32 vclk , u32 dclk )
{
/* todo */
return 0 ;
}
static int nv_set_vce_clocks ( struct amdgpu_device * adev , u32 evclk , u32 ecclk )
{
/* todo */
return 0 ;
}
static void nv_pcie_gen3_enable ( struct amdgpu_device * adev )
{
if ( pci_is_root_bus ( adev - > pdev - > bus ) )
return ;
if ( amdgpu_pcie_gen2 = = 0 )
return ;
if ( ! ( adev - > pm . pcie_gen_mask & ( CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 ) ) )
return ;
/* todo */
}
static void nv_program_aspm ( struct amdgpu_device * adev )
{
if ( amdgpu_aspm = = 0 )
return ;
/* todo */
}
static void nv_enable_doorbell_aperture ( struct amdgpu_device * adev ,
bool enable )
{
adev - > nbio_funcs - > enable_doorbell_aperture ( adev , enable ) ;
adev - > nbio_funcs - > enable_doorbell_selfring_aperture ( adev , enable ) ;
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_COMMON ,
. major = 1 ,
. minor = 0 ,
. rev = 0 ,
. funcs = & nv_common_ip_funcs ,
} ;
int nv_set_ip_blocks ( struct amdgpu_device * adev )
{
/* Set IP register base before any HW register access */
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
navi10_reg_base_init ( adev ) ;
break ;
2018-12-17 18:24:03 +08:00
case CHIP_NAVI14 :
navi14_reg_base_init ( adev ) ;
break ;
2019-03-04 14:07:37 +08:00
default :
return - EINVAL ;
}
adev - > nbio_funcs = & nbio_v2_3_funcs ;
adev - > nbio_funcs - > detect_hw_virt ( adev ) ;
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_PSP & &
is_support_sw_smu ( adev ) )
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2019-07-05 15:39:39 -05:00
# if defined(CONFIG_DRM_AMD_DC)
2019-02-26 16:25:27 -05:00
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2019-07-05 15:39:39 -05:00
# endif
2019-03-04 14:07:37 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v5_0_ip_block ) ;
if ( adev - > firmware . load_type = = AMDGPU_FW_LOAD_DIRECT & &
is_support_sw_smu ( adev ) )
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & vcn_v2_0_ip_block ) ;
if ( adev - > enable_mes )
amdgpu_device_ip_block_add ( adev , & mes_v10_1_ip_block ) ;
break ;
2018-12-19 20:39:37 +08:00
case CHIP_NAVI14 :
amdgpu_device_ip_block_add ( adev , & nv_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & navi10_ih_ip_block ) ;
2019-02-10 21:45:32 +00:00
amdgpu_device_ip_block_add ( adev , & psp_v11_0_ip_block ) ;
2019-02-13 03:34:54 +08:00
if ( is_support_sw_smu ( adev ) )
amdgpu_device_ip_block_add ( adev , & smu_v11_0_ip_block ) ;
2019-01-16 10:23:17 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2019-02-26 13:38:17 -05:00
else if ( amdgpu_device_has_dc_support ( adev ) )
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2018-12-19 20:39:37 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v10_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v5_0_ip_block ) ;
2019-03-01 16:23:55 -05:00
amdgpu_device_ip_block_add ( adev , & vcn_v2_0_ip_block ) ;
2018-12-19 20:39:37 +08:00
break ;
2019-03-04 14:07:37 +08:00
default :
return - EINVAL ;
}
return 0 ;
}
static uint32_t nv_get_rev_id ( struct amdgpu_device * adev )
{
return adev - > nbio_funcs - > get_rev_id ( adev ) ;
}
static void nv_flush_hdp ( struct amdgpu_device * adev , struct amdgpu_ring * ring )
{
adev - > nbio_funcs - > hdp_flush ( adev , ring ) ;
}
static void nv_invalidate_hdp ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring )
{
if ( ! ring | | ! ring - > funcs - > emit_wreg ) {
WREG32_SOC15_NO_KIQ ( NBIO , 0 , mmHDP_READ_CACHE_INVALIDATE , 1 ) ;
} else {
amdgpu_ring_emit_wreg ( ring , SOC15_REG_OFFSET (
HDP , 0 , mmHDP_READ_CACHE_INVALIDATE ) , 1 ) ;
}
}
static bool nv_need_full_reset ( struct amdgpu_device * adev )
{
return true ;
}
static void nv_get_pcie_usage ( struct amdgpu_device * adev ,
uint64_t * count0 ,
uint64_t * count1 )
{
/*TODO*/
}
static bool nv_need_reset_on_init ( struct amdgpu_device * adev )
{
#if 0
u32 sol_reg ;
if ( adev - > flags & AMD_IS_APU )
return false ;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded .
*/
sol_reg = RREG32_SOC15 ( MP0 , 0 , mmMP0_SMN_C2PMSG_81 ) ;
if ( sol_reg )
return true ;
# endif
/* TODO: re-enable it when mode1 reset is functional */
return false ;
}
static void nv_init_doorbell_index ( struct amdgpu_device * adev )
{
adev - > doorbell_index . kiq = AMDGPU_NAVI10_DOORBELL_KIQ ;
adev - > doorbell_index . mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0 ;
adev - > doorbell_index . mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1 ;
adev - > doorbell_index . mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2 ;
adev - > doorbell_index . mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3 ;
adev - > doorbell_index . mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4 ;
adev - > doorbell_index . mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5 ;
adev - > doorbell_index . mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6 ;
adev - > doorbell_index . mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7 ;
adev - > doorbell_index . userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START ;
adev - > doorbell_index . userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END ;
adev - > doorbell_index . gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0 ;
adev - > doorbell_index . gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1 ;
adev - > doorbell_index . sdma_engine [ 0 ] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0 ;
adev - > doorbell_index . sdma_engine [ 1 ] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1 ;
adev - > doorbell_index . ih = AMDGPU_NAVI10_DOORBELL_IH ;
adev - > doorbell_index . vcn . vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1 ;
adev - > doorbell_index . vcn . vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3 ;
adev - > doorbell_index . vcn . vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5 ;
adev - > doorbell_index . vcn . vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7 ;
adev - > doorbell_index . first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP ;
adev - > doorbell_index . last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP ;
adev - > doorbell_index . max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT < < 1 ;
adev - > doorbell_index . sdma_doorbell_range = 20 ;
}
static const struct amdgpu_asic_funcs nv_asic_funcs =
{
. read_disabled_bios = & nv_read_disabled_bios ,
. read_bios_from_rom = & nv_read_bios_from_rom ,
. read_register = & nv_read_register ,
. reset = & nv_asic_reset ,
. set_vga_state = & nv_vga_set_state ,
. get_xclk = & nv_get_xclk ,
. set_uvd_clocks = & nv_set_uvd_clocks ,
. set_vce_clocks = & nv_set_vce_clocks ,
. get_config_memsize = & nv_get_config_memsize ,
. flush_hdp = & nv_flush_hdp ,
. invalidate_hdp = & nv_invalidate_hdp ,
. init_doorbell_index = & nv_init_doorbell_index ,
. need_full_reset = & nv_need_full_reset ,
. get_pcie_usage = & nv_get_pcie_usage ,
. need_reset_on_init = & nv_need_reset_on_init ,
} ;
static int nv_common_early_init ( void * handle )
{
bool psp_enabled = false ;
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
adev - > smc_rreg = NULL ;
adev - > smc_wreg = NULL ;
adev - > pcie_rreg = & nv_pcie_rreg ;
adev - > pcie_wreg = & nv_pcie_wreg ;
/* TODO: will add them during VCN v2 implementation */
adev - > uvd_ctx_rreg = NULL ;
adev - > uvd_ctx_wreg = NULL ;
adev - > didt_rreg = & nv_didt_rreg ;
adev - > didt_wreg = & nv_didt_wreg ;
adev - > asic_funcs = & nv_asic_funcs ;
if ( amdgpu_device_ip_get_ip_block ( adev , AMD_IP_BLOCK_TYPE_PSP ) & &
( amdgpu_ip_block_mask & ( 1 < < AMD_IP_BLOCK_TYPE_PSP ) ) )
psp_enabled = true ;
adev - > rev_id = nv_get_rev_id ( adev ) ;
adev - > external_rev_id = 0xff ;
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS ;
2019-05-15 13:58:20 -04:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN |
2019-06-14 16:12:51 +08:00
AMD_PG_SUPPORT_VCN_DPG |
2019-06-14 16:19:36 +08:00
AMD_PG_SUPPORT_MMHUB |
AMD_PG_SUPPORT_ATHUB ;
2019-03-04 14:07:37 +08:00
adev - > external_rev_id = adev - > rev_id + 0x1 ;
break ;
2018-12-17 18:23:27 +08:00
case CHIP_NAVI14 :
2019-03-20 16:12:54 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS ;
2019-07-02 12:49:41 -05:00
adev - > pg_flags = AMD_PG_SUPPORT_VCN ;
2018-12-17 18:23:27 +08:00
adev - > external_rev_id = adev - > rev_id + 0x1 ; /* ??? */
break ;
2019-03-04 14:07:37 +08:00
default :
/* FIXME: not supported yet */
return - EINVAL ;
}
return 0 ;
}
static int nv_common_late_init ( void * handle )
{
return 0 ;
}
static int nv_common_sw_init ( void * handle )
{
return 0 ;
}
static int nv_common_sw_fini ( void * handle )
{
return 0 ;
}
static int nv_common_hw_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
/* enable pcie gen2/3 link */
nv_pcie_gen3_enable ( adev ) ;
/* enable aspm */
nv_program_aspm ( adev ) ;
/* setup nbio registers */
adev - > nbio_funcs - > init_registers ( adev ) ;
/* enable the doorbell aperture */
nv_enable_doorbell_aperture ( adev , true ) ;
return 0 ;
}
static int nv_common_hw_fini ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
/* disable the doorbell aperture */
nv_enable_doorbell_aperture ( adev , false ) ;
return 0 ;
}
static int nv_common_suspend ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
return nv_common_hw_fini ( adev ) ;
}
static int nv_common_resume ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
return nv_common_hw_init ( adev ) ;
}
static bool nv_common_is_idle ( void * handle )
{
return true ;
}
static int nv_common_wait_for_idle ( void * handle )
{
return 0 ;
}
static int nv_common_soft_reset ( void * handle )
{
return 0 ;
}
static void nv_update_hdp_mem_power_gating ( struct amdgpu_device * adev ,
bool enable )
{
uint32_t hdp_clk_cntl , hdp_clk_cntl1 ;
uint32_t hdp_mem_pwr_cntl ;
if ( ! ( adev - > cg_flags & ( AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD ) ) )
return ;
hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL ) ;
hdp_mem_pwr_cntl = RREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL ) ;
/* Before doing clock/power mode switch,
* forced on IPH & RC clock */
hdp_clk_cntl = REG_SET_FIELD ( hdp_clk_cntl , HDP_CLK_CNTL ,
IPH_MEM_CLK_SOFT_OVERRIDE , 1 ) ;
hdp_clk_cntl = REG_SET_FIELD ( hdp_clk_cntl , HDP_CLK_CNTL ,
RC_MEM_CLK_SOFT_OVERRIDE , 1 ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL , hdp_clk_cntl ) ;
/* HDP 5.0 doesn't support dynamic power mode switch,
* disable clock and power gating before any changing */
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_CTRL_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_LS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_DS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_SD_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_CTRL_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_LS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_DS_EN , 0 ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl , HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_SD_EN , 0 ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL , hdp_mem_pwr_cntl ) ;
/* only one clock gating mode (LS/DS/SD) can be enabled */
if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_LS ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_LS_EN , enable ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_LS_EN , enable ) ;
} else if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_DS ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_DS_EN , enable ) ;
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_DS_EN , enable ) ;
} else if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_SD ) {
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
IPH_MEM_POWER_SD_EN , enable ) ;
/* RC should not use shut down mode, fallback to ds */
hdp_mem_pwr_cntl = REG_SET_FIELD ( hdp_mem_pwr_cntl ,
HDP_MEM_POWER_CTRL ,
RC_MEM_POWER_DS_EN , enable ) ;
}
WREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL , hdp_mem_pwr_cntl ) ;
/* restore IPH & RC clock override after clock/power mode changing */
WREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL , hdp_clk_cntl1 ) ;
}
static void nv_update_hdp_clock_gating ( struct amdgpu_device * adev ,
bool enable )
{
uint32_t hdp_clk_cntl ;
if ( ! ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_MGCG ) )
return ;
hdp_clk_cntl = RREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL ) ;
if ( enable ) {
hdp_clk_cntl & =
~ ( uint32_t )
( HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK ) ;
} else {
hdp_clk_cntl | = HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK ;
}
WREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL , hdp_clk_cntl ) ;
}
static int nv_common_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) )
return 0 ;
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
2018-12-17 18:23:27 +08:00
case CHIP_NAVI14 :
2019-03-04 14:07:37 +08:00
adev - > nbio_funcs - > update_medium_grain_clock_gating ( adev ,
state = = AMD_CG_STATE_GATE ? true : false ) ;
adev - > nbio_funcs - > update_medium_grain_light_sleep ( adev ,
state = = AMD_CG_STATE_GATE ? true : false ) ;
nv_update_hdp_mem_power_gating ( adev ,
state = = AMD_CG_STATE_GATE ? true : false ) ;
nv_update_hdp_clock_gating ( adev ,
state = = AMD_CG_STATE_GATE ? true : false ) ;
break ;
default :
break ;
}
return 0 ;
}
static int nv_common_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
{
/* TODO */
return 0 ;
}
static void nv_common_get_clockgating_state ( void * handle , u32 * flags )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
uint32_t tmp ;
if ( amdgpu_sriov_vf ( adev ) )
* flags = 0 ;
adev - > nbio_funcs - > get_clockgating_state ( adev , flags ) ;
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_CLK_CNTL ) ;
if ( ! ( tmp & ( HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK ) ) )
* flags | = AMD_CG_SUPPORT_HDP_MGCG ;
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_MEM_POWER_CTRL ) ;
if ( tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK )
* flags | = AMD_CG_SUPPORT_HDP_LS ;
else if ( tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK )
* flags | = AMD_CG_SUPPORT_HDP_DS ;
else if ( tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK )
* flags | = AMD_CG_SUPPORT_HDP_SD ;
return ;
}
static const struct amd_ip_funcs nv_common_ip_funcs = {
. name = " nv_common " ,
. early_init = nv_common_early_init ,
. late_init = nv_common_late_init ,
. sw_init = nv_common_sw_init ,
. sw_fini = nv_common_sw_fini ,
. hw_init = nv_common_hw_init ,
. hw_fini = nv_common_hw_fini ,
. suspend = nv_common_suspend ,
. resume = nv_common_resume ,
. is_idle = nv_common_is_idle ,
. wait_for_idle = nv_common_wait_for_idle ,
. soft_reset = nv_common_soft_reset ,
. set_clockgating_state = nv_common_set_clockgating_state ,
. set_powergating_state = nv_common_set_powergating_state ,
. get_clockgating_state = nv_common_get_clockgating_state ,
} ;