2015-04-20 17:31:14 -04:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include <linux/slab.h>
2017-04-24 13:50:21 +09:00
# include <drm/drmP.h>
2015-04-20 17:31:14 -04:00
# include "amdgpu.h"
# include "amdgpu_atombios.h"
# include "amdgpu_ih.h"
# include "amdgpu_uvd.h"
# include "amdgpu_vce.h"
# include "amdgpu_ucode.h"
# include "atom.h"
2015-11-11 19:45:06 -05:00
# include "amd_pcie.h"
2015-04-20 17:31:14 -04:00
# include "gmc/gmc_8_1_d.h"
# include "gmc/gmc_8_1_sh_mask.h"
# include "oss/oss_3_0_d.h"
# include "oss/oss_3_0_sh_mask.h"
# include "bif/bif_5_0_d.h"
# include "bif/bif_5_0_sh_mask.h"
# include "gca/gfx_8_0_d.h"
# include "gca/gfx_8_0_sh_mask.h"
# include "smu/smu_7_1_1_d.h"
# include "smu/smu_7_1_1_sh_mask.h"
# include "uvd/uvd_5_0_d.h"
# include "uvd/uvd_5_0_sh_mask.h"
# include "vce/vce_3_0_d.h"
# include "vce/vce_3_0_sh_mask.h"
# include "dce/dce_10_0_d.h"
# include "dce/dce_10_0_sh_mask.h"
# include "vid.h"
# include "vi.h"
# include "vi_dpm.h"
# include "gmc_v8_0.h"
2016-02-03 19:16:54 +08:00
# include "gmc_v7_0.h"
2015-04-20 17:31:14 -04:00
# include "gfx_v8_0.h"
# include "sdma_v2_4.h"
# include "sdma_v3_0.h"
# include "dce_v10_0.h"
# include "dce_v11_0.h"
# include "iceland_ih.h"
# include "tonga_ih.h"
# include "cz_ih.h"
# include "uvd_v5_0.h"
# include "uvd_v6_0.h"
# include "vce_v3_0.h"
2015-09-22 17:05:20 -04:00
# if defined(CONFIG_DRM_AMD_ACP)
# include "amdgpu_acp.h"
# endif
2016-08-08 11:36:45 +08:00
# include "dce_virtual.h"
2017-01-12 15:22:18 +08:00
# include "mxgpu_vi.h"
2017-09-12 15:58:20 -04:00
# include "amdgpu_dm.h"
2015-04-20 17:31:14 -04:00
/*
* Indirect registers accessor
*/
static u32 vi_pcie_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags ;
u32 r ;
spin_lock_irqsave ( & adev - > pcie_idx_lock , flags ) ;
WREG32 ( mmPCIE_INDEX , reg ) ;
( void ) RREG32 ( mmPCIE_INDEX ) ;
r = RREG32 ( mmPCIE_DATA ) ;
spin_unlock_irqrestore ( & adev - > pcie_idx_lock , flags ) ;
return r ;
}
static void vi_pcie_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags ;
spin_lock_irqsave ( & adev - > pcie_idx_lock , flags ) ;
WREG32 ( mmPCIE_INDEX , reg ) ;
( void ) RREG32 ( mmPCIE_INDEX ) ;
WREG32 ( mmPCIE_DATA , v ) ;
( void ) RREG32 ( mmPCIE_DATA ) ;
spin_unlock_irqrestore ( & adev - > pcie_idx_lock , flags ) ;
}
static u32 vi_smc_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags ;
u32 r ;
spin_lock_irqsave ( & adev - > smc_idx_lock , flags ) ;
2018-08-15 16:20:55 +08:00
WREG32_NO_KIQ ( mmSMC_IND_INDEX_11 , ( reg ) ) ;
r = RREG32_NO_KIQ ( mmSMC_IND_DATA_11 ) ;
2015-04-20 17:31:14 -04:00
spin_unlock_irqrestore ( & adev - > smc_idx_lock , flags ) ;
return r ;
}
static void vi_smc_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags ;
spin_lock_irqsave ( & adev - > smc_idx_lock , flags ) ;
2016-03-29 11:01:51 +08:00
WREG32 ( mmSMC_IND_INDEX_11 , ( reg ) ) ;
WREG32 ( mmSMC_IND_DATA_11 , ( v ) ) ;
2015-04-20 17:31:14 -04:00
spin_unlock_irqrestore ( & adev - > smc_idx_lock , flags ) ;
}
2015-07-10 16:21:10 -04:00
/* smu_8_0_d.h */
# define mmMP0PUB_IND_INDEX 0x180
# define mmMP0PUB_IND_DATA 0x181
static u32 cz_smc_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags ;
u32 r ;
spin_lock_irqsave ( & adev - > smc_idx_lock , flags ) ;
WREG32 ( mmMP0PUB_IND_INDEX , ( reg ) ) ;
r = RREG32 ( mmMP0PUB_IND_DATA ) ;
spin_unlock_irqrestore ( & adev - > smc_idx_lock , flags ) ;
return r ;
}
static void cz_smc_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags ;
spin_lock_irqsave ( & adev - > smc_idx_lock , flags ) ;
WREG32 ( mmMP0PUB_IND_INDEX , ( reg ) ) ;
WREG32 ( mmMP0PUB_IND_DATA , ( v ) ) ;
spin_unlock_irqrestore ( & adev - > smc_idx_lock , flags ) ;
}
2015-04-20 17:31:14 -04:00
static u32 vi_uvd_ctx_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags ;
u32 r ;
spin_lock_irqsave ( & adev - > uvd_ctx_idx_lock , flags ) ;
WREG32 ( mmUVD_CTX_INDEX , ( ( reg ) & 0x1ff ) ) ;
r = RREG32 ( mmUVD_CTX_DATA ) ;
spin_unlock_irqrestore ( & adev - > uvd_ctx_idx_lock , flags ) ;
return r ;
}
static void vi_uvd_ctx_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags ;
spin_lock_irqsave ( & adev - > uvd_ctx_idx_lock , flags ) ;
WREG32 ( mmUVD_CTX_INDEX , ( ( reg ) & 0x1ff ) ) ;
WREG32 ( mmUVD_CTX_DATA , ( v ) ) ;
spin_unlock_irqrestore ( & adev - > uvd_ctx_idx_lock , flags ) ;
}
static u32 vi_didt_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags ;
u32 r ;
spin_lock_irqsave ( & adev - > didt_idx_lock , flags ) ;
WREG32 ( mmDIDT_IND_INDEX , ( reg ) ) ;
r = RREG32 ( mmDIDT_IND_DATA ) ;
spin_unlock_irqrestore ( & adev - > didt_idx_lock , flags ) ;
return r ;
}
static void vi_didt_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags ;
spin_lock_irqsave ( & adev - > didt_idx_lock , flags ) ;
WREG32 ( mmDIDT_IND_INDEX , ( reg ) ) ;
WREG32 ( mmDIDT_IND_DATA , ( v ) ) ;
spin_unlock_irqrestore ( & adev - > didt_idx_lock , flags ) ;
}
2016-06-08 12:47:41 +08:00
static u32 vi_gc_cac_rreg ( struct amdgpu_device * adev , u32 reg )
{
unsigned long flags ;
u32 r ;
spin_lock_irqsave ( & adev - > gc_cac_idx_lock , flags ) ;
WREG32 ( mmGC_CAC_IND_INDEX , ( reg ) ) ;
r = RREG32 ( mmGC_CAC_IND_DATA ) ;
spin_unlock_irqrestore ( & adev - > gc_cac_idx_lock , flags ) ;
return r ;
}
static void vi_gc_cac_wreg ( struct amdgpu_device * adev , u32 reg , u32 v )
{
unsigned long flags ;
spin_lock_irqsave ( & adev - > gc_cac_idx_lock , flags ) ;
WREG32 ( mmGC_CAC_IND_INDEX , ( reg ) ) ;
WREG32 ( mmGC_CAC_IND_DATA , ( v ) ) ;
spin_unlock_irqrestore ( & adev - > gc_cac_idx_lock , flags ) ;
}
2015-04-20 17:31:14 -04:00
static const u32 tonga_mgcg_cgcg_init [ ] =
{
mmCGTT_DRM_CLK_CTRL0 , 0xffffffff , 0x00600100 ,
mmPCIE_INDEX , 0xffffffff , 0x0140001c ,
mmPCIE_DATA , 0x000f0000 , 0x00000000 ,
mmSMC_IND_INDEX_4 , 0xffffffff , 0xC060000C ,
mmSMC_IND_DATA_4 , 0xc0000fff , 0x00000100 ,
mmCGTT_DRM_CLK_CTRL0 , 0xff000fff , 0x00000100 ,
mmHDP_XDP_CGTT_BLK_CTRL , 0xc0000fff , 0x00000104 ,
} ;
2015-07-08 01:05:16 +08:00
static const u32 fiji_mgcg_cgcg_init [ ] =
{
mmCGTT_DRM_CLK_CTRL0 , 0xffffffff , 0x00600100 ,
mmPCIE_INDEX , 0xffffffff , 0x0140001c ,
mmPCIE_DATA , 0x000f0000 , 0x00000000 ,
mmSMC_IND_INDEX_4 , 0xffffffff , 0xC060000C ,
mmSMC_IND_DATA_4 , 0xc0000fff , 0x00000100 ,
mmCGTT_DRM_CLK_CTRL0 , 0xff000fff , 0x00000100 ,
mmHDP_XDP_CGTT_BLK_CTRL , 0xc0000fff , 0x00000104 ,
} ;
2015-04-20 17:31:14 -04:00
static const u32 iceland_mgcg_cgcg_init [ ] =
{
mmPCIE_INDEX , 0xffffffff , ixPCIE_CNTL2 ,
mmPCIE_DATA , 0x000f0000 , 0x00000000 ,
mmSMC_IND_INDEX_4 , 0xffffffff , ixCGTT_ROM_CLK_CTRL0 ,
mmSMC_IND_DATA_4 , 0xc0000fff , 0x00000100 ,
mmHDP_XDP_CGTT_BLK_CTRL , 0xc0000fff , 0x00000104 ,
} ;
static const u32 cz_mgcg_cgcg_init [ ] =
{
mmCGTT_DRM_CLK_CTRL0 , 0xffffffff , 0x00600100 ,
mmPCIE_INDEX , 0xffffffff , 0x0140001c ,
mmPCIE_DATA , 0x000f0000 , 0x00000000 ,
mmCGTT_DRM_CLK_CTRL0 , 0xff000fff , 0x00000100 ,
mmHDP_XDP_CGTT_BLK_CTRL , 0xc0000fff , 0x00000104 ,
} ;
2015-10-08 16:31:43 -04:00
static const u32 stoney_mgcg_cgcg_init [ ] =
{
mmCGTT_DRM_CLK_CTRL0 , 0xffffffff , 0x00000100 ,
mmHDP_XDP_CGTT_BLK_CTRL , 0xffffffff , 0x00000104 ,
mmHDP_HOST_PATH_CNTL , 0xffffffff , 0x0f000027 ,
} ;
2015-04-20 17:31:14 -04:00
static void vi_init_golden_registers ( struct amdgpu_device * adev )
{
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
mutex_lock ( & adev - > grbm_idx_mutex ) ;
2017-01-12 15:22:18 +08:00
if ( amdgpu_sriov_vf ( adev ) ) {
xgpu_vi_init_golden_registers ( adev ) ;
mutex_unlock ( & adev - > grbm_idx_mutex ) ;
return ;
}
2015-04-20 17:31:14 -04:00
switch ( adev - > asic_type ) {
case CHIP_TOPAZ :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
iceland_mgcg_cgcg_init ,
ARRAY_SIZE ( iceland_mgcg_cgcg_init ) ) ;
2015-04-20 17:31:14 -04:00
break ;
2015-07-08 01:05:16 +08:00
case CHIP_FIJI :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
fiji_mgcg_cgcg_init ,
ARRAY_SIZE ( fiji_mgcg_cgcg_init ) ) ;
2015-07-08 01:05:16 +08:00
break ;
2015-04-20 17:31:14 -04:00
case CHIP_TONGA :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
tonga_mgcg_cgcg_init ,
ARRAY_SIZE ( tonga_mgcg_cgcg_init ) ) ;
2015-04-20 17:31:14 -04:00
break ;
case CHIP_CARRIZO :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
cz_mgcg_cgcg_init ,
ARRAY_SIZE ( cz_mgcg_cgcg_init ) ) ;
2015-04-20 17:31:14 -04:00
break ;
2015-10-08 16:31:43 -04:00
case CHIP_STONEY :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
stoney_mgcg_cgcg_init ,
ARRAY_SIZE ( stoney_mgcg_cgcg_init ) ) ;
2015-10-08 16:31:43 -04:00
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS10 :
2018-04-11 15:28:28 -05:00
case CHIP_POLARIS11 :
2016-12-14 15:32:28 -05:00
case CHIP_POLARIS12 :
2018-04-11 15:28:28 -05:00
case CHIP_VEGAM :
2015-04-20 17:31:14 -04:00
default :
break ;
}
mutex_unlock ( & adev - > grbm_idx_mutex ) ;
}
/**
* vi_get_xclk - get the xclk
*
* @ adev : amdgpu_device pointer
*
* Returns the reference clock used by the gfx engine
* ( VI ) .
*/
static u32 vi_get_xclk ( struct amdgpu_device * adev )
{
u32 reference_clock = adev - > clock . spll . reference_freq ;
u32 tmp ;
2015-07-22 11:29:01 +08:00
if ( adev - > flags & AMD_IS_APU )
2015-04-20 17:31:14 -04:00
return reference_clock ;
tmp = RREG32_SMC ( ixCG_CLKPIN_CNTL_2 ) ;
if ( REG_GET_FIELD ( tmp , CG_CLKPIN_CNTL_2 , MUX_TCLK_TO_XCLK ) )
return 1000 ;
tmp = RREG32_SMC ( ixCG_CLKPIN_CNTL ) ;
if ( REG_GET_FIELD ( tmp , CG_CLKPIN_CNTL , XTALIN_DIVIDE ) )
return reference_clock / 4 ;
return reference_clock ;
}
/**
* vi_srbm_select - select specific register instances
*
* @ adev : amdgpu_device pointer
* @ me : selected ME ( micro engine )
* @ pipe : pipe
* @ queue : queue
* @ vmid : VMID
*
* Switches the currently active registers instances . Some
* registers are instanced per VMID , others are instanced per
* me / pipe / queue combination .
*/
void vi_srbm_select ( struct amdgpu_device * adev ,
u32 me , u32 pipe , u32 queue , u32 vmid )
{
u32 srbm_gfx_cntl = 0 ;
srbm_gfx_cntl = REG_SET_FIELD ( srbm_gfx_cntl , SRBM_GFX_CNTL , PIPEID , pipe ) ;
srbm_gfx_cntl = REG_SET_FIELD ( srbm_gfx_cntl , SRBM_GFX_CNTL , MEID , me ) ;
srbm_gfx_cntl = REG_SET_FIELD ( srbm_gfx_cntl , SRBM_GFX_CNTL , VMID , vmid ) ;
srbm_gfx_cntl = REG_SET_FIELD ( srbm_gfx_cntl , SRBM_GFX_CNTL , QUEUEID , queue ) ;
WREG32 ( mmSRBM_GFX_CNTL , srbm_gfx_cntl ) ;
}
static void vi_vga_set_state ( struct amdgpu_device * adev , bool state )
{
/* todo */
}
static bool vi_read_disabled_bios ( struct amdgpu_device * adev )
{
u32 bus_cntl ;
u32 d1vga_control = 0 ;
u32 d2vga_control = 0 ;
u32 vga_render_control = 0 ;
u32 rom_cntl ;
bool r ;
bus_cntl = RREG32 ( mmBUS_CNTL ) ;
if ( adev - > mode_info . num_crtc ) {
d1vga_control = RREG32 ( mmD1VGA_CONTROL ) ;
d2vga_control = RREG32 ( mmD2VGA_CONTROL ) ;
vga_render_control = RREG32 ( mmVGA_RENDER_CONTROL ) ;
}
rom_cntl = RREG32_SMC ( ixROM_CNTL ) ;
/* enable the rom */
WREG32 ( mmBUS_CNTL , ( bus_cntl & ~ BUS_CNTL__BIOS_ROM_DIS_MASK ) ) ;
if ( adev - > mode_info . num_crtc ) {
/* Disable VGA mode */
WREG32 ( mmD1VGA_CONTROL ,
( d1vga_control & ~ ( D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK ) ) ) ;
WREG32 ( mmD2VGA_CONTROL ,
( d2vga_control & ~ ( D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK ) ) ) ;
WREG32 ( mmVGA_RENDER_CONTROL ,
( vga_render_control & ~ VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK ) ) ;
}
WREG32_SMC ( ixROM_CNTL , rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK ) ;
r = amdgpu_read_bios ( adev ) ;
/* restore regs */
WREG32 ( mmBUS_CNTL , bus_cntl ) ;
if ( adev - > mode_info . num_crtc ) {
WREG32 ( mmD1VGA_CONTROL , d1vga_control ) ;
WREG32 ( mmD2VGA_CONTROL , d2vga_control ) ;
WREG32 ( mmVGA_RENDER_CONTROL , vga_render_control ) ;
}
WREG32_SMC ( ixROM_CNTL , rom_cntl ) ;
return r ;
}
2015-11-24 10:37:54 -05:00
static bool vi_read_bios_from_rom ( struct amdgpu_device * adev ,
u8 * bios , u32 length_bytes )
{
u32 * dw_ptr ;
unsigned long flags ;
u32 i , length_dw ;
if ( bios = = NULL )
return false ;
if ( length_bytes = = 0 )
return false ;
/* APU vbios image is part of sbios image */
if ( adev - > flags & AMD_IS_APU )
return false ;
dw_ptr = ( u32 * ) bios ;
length_dw = ALIGN ( length_bytes , 4 ) / 4 ;
/* take the smc lock since we are using the smc index */
spin_lock_irqsave ( & adev - > smc_idx_lock , flags ) ;
/* set rom index to 0 */
2016-03-29 11:01:51 +08:00
WREG32 ( mmSMC_IND_INDEX_11 , ixROM_INDEX ) ;
WREG32 ( mmSMC_IND_DATA_11 , 0 ) ;
2015-11-24 10:37:54 -05:00
/* set index to data for continous read */
2016-03-29 11:01:51 +08:00
WREG32 ( mmSMC_IND_INDEX_11 , ixROM_DATA ) ;
2015-11-24 10:37:54 -05:00
for ( i = 0 ; i < length_dw ; i + + )
2016-03-29 11:01:51 +08:00
dw_ptr [ i ] = RREG32 ( mmSMC_IND_DATA_11 ) ;
2015-11-24 10:37:54 -05:00
spin_unlock_irqrestore ( & adev - > smc_idx_lock , flags ) ;
return true ;
}
2016-03-31 13:26:59 +08:00
static void vi_detect_hw_virtualization ( struct amdgpu_device * adev )
2016-06-11 02:51:32 -04:00
{
2017-12-19 09:52:31 -05:00
uint32_t reg = 0 ;
if ( adev - > asic_type = = CHIP_TONGA | |
adev - > asic_type = = CHIP_FIJI ) {
reg = RREG32 ( mmBIF_IOV_FUNC_IDENTIFIER ) ;
/* bit0: 0 means pf and 1 means vf */
2017-12-19 09:57:53 -05:00
if ( REG_GET_FIELD ( reg , BIF_IOV_FUNC_IDENTIFIER , FUNC_IDENTIFIER ) )
2017-12-19 09:52:31 -05:00
adev - > virt . caps | = AMDGPU_SRIOV_CAPS_IS_VF ;
2017-12-19 09:57:53 -05:00
/* bit31: 0 means disable IOV and 1 means enable */
if ( REG_GET_FIELD ( reg , BIF_IOV_FUNC_IDENTIFIER , IOV_ENABLE ) )
2017-12-19 09:52:31 -05:00
adev - > virt . caps | = AMDGPU_SRIOV_CAPS_ENABLE_IOV ;
}
2016-03-31 13:26:59 +08:00
if ( reg = = 0 ) {
if ( is_virtual_machine ( ) ) /* passthrough mode exclus sr-iov mode */
2017-01-09 18:06:57 -05:00
adev - > virt . caps | = AMDGPU_PASSTHROUGH_MODE ;
2016-03-31 13:26:59 +08:00
}
2016-06-11 02:51:32 -04:00
}
2016-03-19 16:12:17 +01:00
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers [ ] = {
2017-04-12 12:49:54 +02:00
{ mmGRBM_STATUS } ,
{ mmGRBM_STATUS2 } ,
{ mmGRBM_STATUS_SE0 } ,
{ mmGRBM_STATUS_SE1 } ,
{ mmGRBM_STATUS_SE2 } ,
{ mmGRBM_STATUS_SE3 } ,
{ mmSRBM_STATUS } ,
{ mmSRBM_STATUS2 } ,
{ mmSRBM_STATUS3 } ,
{ mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET } ,
{ mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET } ,
{ mmCP_STAT } ,
{ mmCP_STALLED_STAT1 } ,
{ mmCP_STALLED_STAT2 } ,
{ mmCP_STALLED_STAT3 } ,
{ mmCP_CPF_BUSY_STAT } ,
{ mmCP_CPF_STALLED_STAT1 } ,
{ mmCP_CPF_STATUS } ,
{ mmCP_CPC_BUSY_STAT } ,
{ mmCP_CPC_STALLED_STAT1 } ,
{ mmCP_CPC_STATUS } ,
{ mmGB_ADDR_CONFIG } ,
{ mmMC_ARB_RAMCFG } ,
{ mmGB_TILE_MODE0 } ,
{ mmGB_TILE_MODE1 } ,
{ mmGB_TILE_MODE2 } ,
{ mmGB_TILE_MODE3 } ,
{ mmGB_TILE_MODE4 } ,
{ mmGB_TILE_MODE5 } ,
{ mmGB_TILE_MODE6 } ,
{ mmGB_TILE_MODE7 } ,
{ mmGB_TILE_MODE8 } ,
{ mmGB_TILE_MODE9 } ,
{ mmGB_TILE_MODE10 } ,
{ mmGB_TILE_MODE11 } ,
{ mmGB_TILE_MODE12 } ,
{ mmGB_TILE_MODE13 } ,
{ mmGB_TILE_MODE14 } ,
{ mmGB_TILE_MODE15 } ,
{ mmGB_TILE_MODE16 } ,
{ mmGB_TILE_MODE17 } ,
{ mmGB_TILE_MODE18 } ,
{ mmGB_TILE_MODE19 } ,
{ mmGB_TILE_MODE20 } ,
{ mmGB_TILE_MODE21 } ,
{ mmGB_TILE_MODE22 } ,
{ mmGB_TILE_MODE23 } ,
{ mmGB_TILE_MODE24 } ,
{ mmGB_TILE_MODE25 } ,
{ mmGB_TILE_MODE26 } ,
{ mmGB_TILE_MODE27 } ,
{ mmGB_TILE_MODE28 } ,
{ mmGB_TILE_MODE29 } ,
{ mmGB_TILE_MODE30 } ,
{ mmGB_TILE_MODE31 } ,
{ mmGB_MACROTILE_MODE0 } ,
{ mmGB_MACROTILE_MODE1 } ,
{ mmGB_MACROTILE_MODE2 } ,
{ mmGB_MACROTILE_MODE3 } ,
{ mmGB_MACROTILE_MODE4 } ,
{ mmGB_MACROTILE_MODE5 } ,
{ mmGB_MACROTILE_MODE6 } ,
{ mmGB_MACROTILE_MODE7 } ,
{ mmGB_MACROTILE_MODE8 } ,
{ mmGB_MACROTILE_MODE9 } ,
{ mmGB_MACROTILE_MODE10 } ,
{ mmGB_MACROTILE_MODE11 } ,
{ mmGB_MACROTILE_MODE12 } ,
{ mmGB_MACROTILE_MODE13 } ,
{ mmGB_MACROTILE_MODE14 } ,
{ mmGB_MACROTILE_MODE15 } ,
{ mmCC_RB_BACKEND_DISABLE , true } ,
{ mmGC_USER_RB_BACKEND_DISABLE , true } ,
{ mmGB_BACKEND_MAP , false } ,
{ mmPA_SC_RASTER_CONFIG , true } ,
{ mmPA_SC_RASTER_CONFIG_1 , true } ,
2015-04-20 17:31:14 -04:00
} ;
2016-10-10 12:05:32 -04:00
static uint32_t vi_get_register_value ( struct amdgpu_device * adev ,
bool indexed , u32 se_num ,
u32 sh_num , u32 reg_offset )
2015-04-20 17:31:14 -04:00
{
2016-10-10 12:05:32 -04:00
if ( indexed ) {
uint32_t val ;
unsigned se_idx = ( se_num = = 0xffffffff ) ? 0 : se_num ;
unsigned sh_idx = ( sh_num = = 0xffffffff ) ? 0 : sh_num ;
switch ( reg_offset ) {
case mmCC_RB_BACKEND_DISABLE :
return adev - > gfx . config . rb_config [ se_idx ] [ sh_idx ] . rb_backend_disable ;
case mmGC_USER_RB_BACKEND_DISABLE :
return adev - > gfx . config . rb_config [ se_idx ] [ sh_idx ] . user_rb_backend_disable ;
case mmPA_SC_RASTER_CONFIG :
return adev - > gfx . config . rb_config [ se_idx ] [ sh_idx ] . raster_config ;
case mmPA_SC_RASTER_CONFIG_1 :
return adev - > gfx . config . rb_config [ se_idx ] [ sh_idx ] . raster_config_1 ;
}
2015-04-20 17:31:14 -04:00
2016-10-10 12:05:32 -04:00
mutex_lock ( & adev - > grbm_idx_mutex ) ;
if ( se_num ! = 0xffffffff | | sh_num ! = 0xffffffff )
amdgpu_gfx_select_se_sh ( adev , se_num , sh_num , 0xffffffff ) ;
val = RREG32 ( reg_offset ) ;
if ( se_num ! = 0xffffffff | | sh_num ! = 0xffffffff )
amdgpu_gfx_select_se_sh ( adev , 0xffffffff , 0xffffffff , 0xffffffff ) ;
mutex_unlock ( & adev - > grbm_idx_mutex ) ;
return val ;
} else {
unsigned idx ;
switch ( reg_offset ) {
case mmGB_ADDR_CONFIG :
return adev - > gfx . config . gb_addr_config ;
case mmMC_ARB_RAMCFG :
return adev - > gfx . config . mc_arb_ramcfg ;
case mmGB_TILE_MODE0 :
case mmGB_TILE_MODE1 :
case mmGB_TILE_MODE2 :
case mmGB_TILE_MODE3 :
case mmGB_TILE_MODE4 :
case mmGB_TILE_MODE5 :
case mmGB_TILE_MODE6 :
case mmGB_TILE_MODE7 :
case mmGB_TILE_MODE8 :
case mmGB_TILE_MODE9 :
case mmGB_TILE_MODE10 :
case mmGB_TILE_MODE11 :
case mmGB_TILE_MODE12 :
case mmGB_TILE_MODE13 :
case mmGB_TILE_MODE14 :
case mmGB_TILE_MODE15 :
case mmGB_TILE_MODE16 :
case mmGB_TILE_MODE17 :
case mmGB_TILE_MODE18 :
case mmGB_TILE_MODE19 :
case mmGB_TILE_MODE20 :
case mmGB_TILE_MODE21 :
case mmGB_TILE_MODE22 :
case mmGB_TILE_MODE23 :
case mmGB_TILE_MODE24 :
case mmGB_TILE_MODE25 :
case mmGB_TILE_MODE26 :
case mmGB_TILE_MODE27 :
case mmGB_TILE_MODE28 :
case mmGB_TILE_MODE29 :
case mmGB_TILE_MODE30 :
case mmGB_TILE_MODE31 :
idx = ( reg_offset - mmGB_TILE_MODE0 ) ;
return adev - > gfx . config . tile_mode_array [ idx ] ;
case mmGB_MACROTILE_MODE0 :
case mmGB_MACROTILE_MODE1 :
case mmGB_MACROTILE_MODE2 :
case mmGB_MACROTILE_MODE3 :
case mmGB_MACROTILE_MODE4 :
case mmGB_MACROTILE_MODE5 :
case mmGB_MACROTILE_MODE6 :
case mmGB_MACROTILE_MODE7 :
case mmGB_MACROTILE_MODE8 :
case mmGB_MACROTILE_MODE9 :
case mmGB_MACROTILE_MODE10 :
case mmGB_MACROTILE_MODE11 :
case mmGB_MACROTILE_MODE12 :
case mmGB_MACROTILE_MODE13 :
case mmGB_MACROTILE_MODE14 :
case mmGB_MACROTILE_MODE15 :
idx = ( reg_offset - mmGB_MACROTILE_MODE0 ) ;
return adev - > gfx . config . macrotile_mode_array [ idx ] ;
default :
return RREG32 ( reg_offset ) ;
}
}
2015-04-20 17:31:14 -04:00
}
static int vi_read_register ( struct amdgpu_device * adev , u32 se_num ,
u32 sh_num , u32 reg_offset , u32 * value )
{
2017-04-12 12:53:18 +02:00
uint32_t i ;
2015-04-20 17:31:14 -04:00
* value = 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( vi_allowed_read_registers ) ; i + + ) {
2017-04-12 12:49:54 +02:00
bool indexed = vi_allowed_read_registers [ i ] . grbm_indexed ;
2015-04-20 17:31:14 -04:00
if ( reg_offset ! = vi_allowed_read_registers [ i ] . reg_offset )
continue ;
2017-04-12 12:49:54 +02:00
* value = vi_get_register_value ( adev , indexed , se_num , sh_num ,
reg_offset ) ;
2015-04-20 17:31:14 -04:00
return 0 ;
}
return - EINVAL ;
}
2016-06-06 13:06:45 +08:00
static int vi_gpu_pci_config_reset ( struct amdgpu_device * adev )
2015-04-20 17:31:14 -04:00
{
2015-10-14 09:39:37 -04:00
u32 i ;
2015-04-20 17:31:14 -04:00
dev_info ( adev - > dev , " GPU pci config reset \n " ) ;
/* disable BM */
pci_clear_master ( adev - > pdev ) ;
/* reset */
2017-12-14 16:22:53 -05:00
amdgpu_device_pci_config_reset ( adev ) ;
2015-04-20 17:31:14 -04:00
udelay ( 100 ) ;
/* wait for asic to come out of reset */
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
2016-06-06 13:50:18 +08:00
if ( RREG32 ( mmCONFIG_MEMSIZE ) ! = 0xffffffff ) {
/* enable BM */
pci_set_master ( adev - > pdev ) ;
2017-02-10 15:59:59 +08:00
adev - > has_hw_reset = true ;
2016-06-06 13:06:45 +08:00
return 0 ;
2016-06-06 13:50:18 +08:00
}
2015-04-20 17:31:14 -04:00
udelay ( 1 ) ;
}
2016-06-06 13:06:45 +08:00
return - EINVAL ;
2015-04-20 17:31:14 -04:00
}
/**
* vi_asic_reset - soft reset GPU
*
* @ adev : amdgpu_device pointer
*
* Look up which blocks are hung and attempt
* to reset them .
* Returns 0 for success .
*/
static int vi_asic_reset ( struct amdgpu_device * adev )
{
2016-06-06 13:06:45 +08:00
int r ;
2016-10-21 15:45:22 -04:00
amdgpu_atombios_scratch_regs_engine_hung ( adev , true ) ;
2015-04-20 17:31:14 -04:00
2016-06-06 13:06:45 +08:00
r = vi_gpu_pci_config_reset ( adev ) ;
2015-04-20 17:31:14 -04:00
2016-10-21 15:45:22 -04:00
amdgpu_atombios_scratch_regs_engine_hung ( adev , false ) ;
2015-04-20 17:31:14 -04:00
2016-06-06 13:06:45 +08:00
return r ;
2015-04-20 17:31:14 -04:00
}
2017-03-03 17:26:10 -05:00
static u32 vi_get_config_memsize ( struct amdgpu_device * adev )
{
return RREG32 ( mmCONFIG_MEMSIZE ) ;
}
2015-04-20 17:31:14 -04:00
static int vi_set_uvd_clock ( struct amdgpu_device * adev , u32 clock ,
u32 cntl_reg , u32 status_reg )
{
int r , i ;
struct atom_clock_dividers dividers ;
uint32_t tmp ;
r = amdgpu_atombios_get_clock_dividers ( adev ,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK ,
clock , false , & dividers ) ;
if ( r )
return r ;
tmp = RREG32_SMC ( cntl_reg ) ;
2018-04-10 17:17:22 +08:00
if ( adev - > flags & AMD_IS_APU )
tmp & = ~ CG_DCLK_CNTL__DCLK_DIVIDER_MASK ;
else
tmp & = ~ ( CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
CG_DCLK_CNTL__DCLK_DIVIDER_MASK ) ;
2015-04-20 17:31:14 -04:00
tmp | = dividers . post_divider ;
WREG32_SMC ( cntl_reg , tmp ) ;
for ( i = 0 ; i < 100 ; i + + ) {
2018-04-10 17:17:22 +08:00
tmp = RREG32_SMC ( status_reg ) ;
if ( adev - > flags & AMD_IS_APU ) {
if ( tmp & 0x10000 )
break ;
} else {
if ( tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK )
break ;
}
2015-04-20 17:31:14 -04:00
mdelay ( 10 ) ;
}
if ( i = = 100 )
return - ETIMEDOUT ;
return 0 ;
}
2018-04-10 17:17:22 +08:00
# define ixGNB_CLK1_DFS_CNTL 0xD82200F0
# define ixGNB_CLK1_STATUS 0xD822010C
# define ixGNB_CLK2_DFS_CNTL 0xD8220110
# define ixGNB_CLK2_STATUS 0xD822012C
2018-04-10 17:49:56 +08:00
# define ixGNB_CLK3_DFS_CNTL 0xD8220130
# define ixGNB_CLK3_STATUS 0xD822014C
2018-04-10 17:17:22 +08:00
2015-04-20 17:31:14 -04:00
static int vi_set_uvd_clocks ( struct amdgpu_device * adev , u32 vclk , u32 dclk )
{
int r ;
2018-04-10 17:17:22 +08:00
if ( adev - > flags & AMD_IS_APU ) {
r = vi_set_uvd_clock ( adev , vclk , ixGNB_CLK2_DFS_CNTL , ixGNB_CLK2_STATUS ) ;
if ( r )
return r ;
2015-04-20 17:31:14 -04:00
2018-04-10 17:17:22 +08:00
r = vi_set_uvd_clock ( adev , dclk , ixGNB_CLK1_DFS_CNTL , ixGNB_CLK1_STATUS ) ;
if ( r )
return r ;
} else {
r = vi_set_uvd_clock ( adev , vclk , ixCG_VCLK_CNTL , ixCG_VCLK_STATUS ) ;
if ( r )
return r ;
r = vi_set_uvd_clock ( adev , dclk , ixCG_DCLK_CNTL , ixCG_DCLK_STATUS ) ;
if ( r )
return r ;
}
2015-04-20 17:31:14 -04:00
return 0 ;
}
static int vi_set_vce_clocks ( struct amdgpu_device * adev , u32 evclk , u32 ecclk )
{
2017-01-10 19:54:25 +08:00
int r , i ;
struct atom_clock_dividers dividers ;
u32 tmp ;
2018-04-10 17:49:56 +08:00
u32 reg_ctrl ;
u32 reg_status ;
u32 status_mask ;
u32 reg_mask ;
if ( adev - > flags & AMD_IS_APU ) {
reg_ctrl = ixGNB_CLK3_DFS_CNTL ;
reg_status = ixGNB_CLK3_STATUS ;
status_mask = 0x00010000 ;
reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK ;
} else {
reg_ctrl = ixCG_ECLK_CNTL ;
reg_status = ixCG_ECLK_STATUS ;
status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK ;
reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK ;
}
2017-01-10 19:54:25 +08:00
r = amdgpu_atombios_get_clock_dividers ( adev ,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK ,
ecclk , false , & dividers ) ;
if ( r )
return r ;
for ( i = 0 ; i < 100 ; i + + ) {
2018-04-10 17:49:56 +08:00
if ( RREG32_SMC ( reg_status ) & status_mask )
2017-01-10 19:54:25 +08:00
break ;
mdelay ( 10 ) ;
}
2018-04-10 17:49:56 +08:00
2017-01-10 19:54:25 +08:00
if ( i = = 100 )
return - ETIMEDOUT ;
2018-04-10 17:49:56 +08:00
tmp = RREG32_SMC ( reg_ctrl ) ;
tmp & = ~ reg_mask ;
2017-01-10 19:54:25 +08:00
tmp | = dividers . post_divider ;
2018-04-10 17:49:56 +08:00
WREG32_SMC ( reg_ctrl , tmp ) ;
2017-01-10 19:54:25 +08:00
for ( i = 0 ; i < 100 ; i + + ) {
2018-04-10 17:49:56 +08:00
if ( RREG32_SMC ( reg_status ) & status_mask )
2017-01-10 19:54:25 +08:00
break ;
mdelay ( 10 ) ;
}
2018-04-10 17:49:56 +08:00
2017-01-10 19:54:25 +08:00
if ( i = = 100 )
return - ETIMEDOUT ;
2015-04-20 17:31:14 -04:00
return 0 ;
}
static void vi_pcie_gen3_enable ( struct amdgpu_device * adev )
{
2015-10-06 09:38:45 -04:00
if ( pci_is_root_bus ( adev - > pdev - > bus ) )
return ;
2015-04-20 17:31:14 -04:00
if ( amdgpu_pcie_gen2 = = 0 )
return ;
2015-07-22 11:29:01 +08:00
if ( adev - > flags & AMD_IS_APU )
2015-04-20 17:31:14 -04:00
return ;
2015-11-11 19:45:06 -05:00
if ( ! ( adev - > pm . pcie_gen_mask & ( CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 ) ) )
2015-04-20 17:31:14 -04:00
return ;
/* todo */
}
static void vi_program_aspm ( struct amdgpu_device * adev )
{
if ( amdgpu_aspm = = 0 )
return ;
/* todo */
}
static void vi_enable_doorbell_aperture ( struct amdgpu_device * adev ,
bool enable )
{
u32 tmp ;
/* not necessary on CZ */
2015-07-22 11:29:01 +08:00
if ( adev - > flags & AMD_IS_APU )
2015-04-20 17:31:14 -04:00
return ;
tmp = RREG32 ( mmBIF_DOORBELL_APER_EN ) ;
if ( enable )
tmp = REG_SET_FIELD ( tmp , BIF_DOORBELL_APER_EN , BIF_DOORBELL_APER_EN , 1 ) ;
else
tmp = REG_SET_FIELD ( tmp , BIF_DOORBELL_APER_EN , BIF_DOORBELL_APER_EN , 0 ) ;
WREG32 ( mmBIF_DOORBELL_APER_EN , tmp ) ;
}
2015-10-08 16:31:43 -04:00
# define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
# define ATI_REV_ID_FUSE_MACRO__SHIFT 9
# define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
2015-04-20 17:31:14 -04:00
static uint32_t vi_get_rev_id ( struct amdgpu_device * adev )
{
2015-11-20 11:40:53 +08:00
if ( adev - > flags & AMD_IS_APU )
2015-10-08 16:31:43 -04:00
return ( RREG32_SMC ( ATI_REV_ID_FUSE_MACRO__ADDRESS ) & ATI_REV_ID_FUSE_MACRO__MASK )
> > ATI_REV_ID_FUSE_MACRO__SHIFT ;
2015-04-20 17:31:14 -04:00
else
2015-11-20 11:40:53 +08:00
return ( RREG32 ( mmPCIE_EFUSE4 ) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK )
> > PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT ;
2015-04-20 17:31:14 -04:00
}
2018-01-19 14:17:40 +01:00
static void vi_flush_hdp ( struct amdgpu_device * adev , struct amdgpu_ring * ring )
2017-09-06 18:06:24 -04:00
{
2018-01-19 14:17:40 +01:00
if ( ! ring | | ! ring - > funcs - > emit_wreg ) {
WREG32 ( mmHDP_MEM_COHERENCY_FLUSH_CNTL , 1 ) ;
RREG32 ( mmHDP_MEM_COHERENCY_FLUSH_CNTL ) ;
} else {
amdgpu_ring_emit_wreg ( ring , mmHDP_MEM_COHERENCY_FLUSH_CNTL , 1 ) ;
}
2017-09-06 18:06:24 -04:00
}
2018-01-19 14:17:40 +01:00
static void vi_invalidate_hdp ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring )
2017-09-06 18:06:24 -04:00
{
2018-01-19 14:17:40 +01:00
if ( ! ring | | ! ring - > funcs - > emit_wreg ) {
WREG32 ( mmHDP_DEBUG0 , 1 ) ;
RREG32 ( mmHDP_DEBUG0 ) ;
} else {
amdgpu_ring_emit_wreg ( ring , mmHDP_DEBUG0 , 1 ) ;
}
2017-09-06 18:06:24 -04:00
}
2018-03-29 14:39:28 -05:00
static bool vi_need_full_reset ( struct amdgpu_device * adev )
{
switch ( adev - > asic_type ) {
case CHIP_CARRIZO :
case CHIP_STONEY :
/* CZ has hang issues with full reset at the moment */
return false ;
case CHIP_FIJI :
case CHIP_TONGA :
/* XXX: soft reset should work on fiji and tonga */
return true ;
case CHIP_POLARIS10 :
case CHIP_POLARIS11 :
case CHIP_POLARIS12 :
case CHIP_TOPAZ :
default :
/* change this when we support soft reset */
return true ;
}
}
2015-04-20 17:31:14 -04:00
static const struct amdgpu_asic_funcs vi_asic_funcs =
{
. read_disabled_bios = & vi_read_disabled_bios ,
2015-11-24 10:37:54 -05:00
. read_bios_from_rom = & vi_read_bios_from_rom ,
2015-04-20 17:31:14 -04:00
. read_register = & vi_read_register ,
. reset = & vi_asic_reset ,
. set_vga_state = & vi_vga_set_state ,
. get_xclk = & vi_get_xclk ,
. set_uvd_clocks = & vi_set_uvd_clocks ,
. set_vce_clocks = & vi_set_vce_clocks ,
2017-03-03 17:26:10 -05:00
. get_config_memsize = & vi_get_config_memsize ,
2017-09-06 18:06:24 -04:00
. flush_hdp = & vi_flush_hdp ,
. invalidate_hdp = & vi_invalidate_hdp ,
2018-03-29 14:39:28 -05:00
. need_full_reset = & vi_need_full_reset ,
2018-11-19 09:51:20 -06:00
. init_doorbell_index = & legacy_doorbell_index_init ,
2015-04-20 17:31:14 -04:00
} ;
2016-08-12 13:47:08 -04:00
# define CZ_REV_BRISTOL(rev) \
( ( rev > = 0xC8 & & rev < = 0xCE ) | | ( rev > = 0xE1 & & rev < = 0xE6 ) )
2015-05-22 14:39:35 -04:00
static int vi_common_early_init ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
2015-07-22 11:29:01 +08:00
if ( adev - > flags & AMD_IS_APU ) {
2015-07-10 16:21:10 -04:00
adev - > smc_rreg = & cz_smc_rreg ;
adev - > smc_wreg = & cz_smc_wreg ;
} else {
adev - > smc_rreg = & vi_smc_rreg ;
adev - > smc_wreg = & vi_smc_wreg ;
}
2015-04-20 17:31:14 -04:00
adev - > pcie_rreg = & vi_pcie_rreg ;
adev - > pcie_wreg = & vi_pcie_wreg ;
adev - > uvd_ctx_rreg = & vi_uvd_ctx_rreg ;
adev - > uvd_ctx_wreg = & vi_uvd_ctx_wreg ;
adev - > didt_rreg = & vi_didt_rreg ;
adev - > didt_wreg = & vi_didt_wreg ;
2016-06-08 12:47:41 +08:00
adev - > gc_cac_rreg = & vi_gc_cac_rreg ;
adev - > gc_cac_wreg = & vi_gc_cac_wreg ;
2015-04-20 17:31:14 -04:00
adev - > asic_funcs = & vi_asic_funcs ;
adev - > rev_id = vi_get_rev_id ( adev ) ;
adev - > external_rev_id = 0xFF ;
switch ( adev - > asic_type ) {
case CHIP_TOPAZ :
adev - > cg_flags = 0 ;
adev - > pg_flags = 0 ;
adev - > external_rev_id = 0x1 ;
break ;
2015-07-08 01:05:16 +08:00
case CHIP_FIJI :
2016-04-07 18:38:00 -04:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CGCG |
2016-04-08 00:42:51 -04:00
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
2016-04-08 00:52:58 -04:00
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
2016-04-08 01:01:18 -04:00
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
2016-11-09 14:30:25 +08:00
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_UVD_MGCG ;
2015-11-02 21:21:34 +08:00
adev - > pg_flags = 0 ;
adev - > external_rev_id = adev - > rev_id + 0x3c ;
break ;
2015-04-20 17:31:14 -04:00
case CHIP_TONGA :
2016-12-07 18:22:38 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_UVD_MGCG ;
2016-12-07 16:06:38 +08:00
adev - > pg_flags = 0 ;
2015-04-20 17:31:14 -04:00
adev - > external_rev_id = adev - > rev_id + 0x14 ;
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS11 :
2016-12-07 18:22:38 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_UVD_MGCG |
2016-11-17 17:29:50 +05:30
AMD_CG_SUPPORT_VCE_MGCG ;
2015-12-07 18:33:10 +08:00
adev - > pg_flags = 0 ;
adev - > external_rev_id = adev - > rev_id + 0x5A ;
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS10 :
2016-12-07 18:22:38 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_UVD_MGCG |
2016-11-17 17:29:50 +05:30
AMD_CG_SUPPORT_VCE_MGCG ;
2015-12-07 18:33:10 +08:00
adev - > pg_flags = 0 ;
adev - > external_rev_id = adev - > rev_id + 0x50 ;
break ;
2016-12-14 15:32:28 -05:00
case CHIP_POLARIS12 :
2017-03-17 19:04:55 +08:00
adev - > cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_VCE_MGCG ;
2016-12-14 15:32:28 -05:00
adev - > pg_flags = 0 ;
adev - > external_rev_id = adev - > rev_id + 0x64 ;
break ;
2018-04-11 15:28:28 -05:00
case CHIP_VEGAM :
adev - > cg_flags = 0 ;
/*AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_VCE_MGCG ; */
adev - > pg_flags = 0 ;
adev - > external_rev_id = adev - > rev_id + 0x6E ;
break ;
2015-04-20 17:31:14 -04:00
case CHIP_CARRIZO :
2016-05-03 10:36:28 -04:00
adev - > cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
2016-04-07 23:01:48 -04:00
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
2018-02-05 09:23:00 +05:30
AMD_CG_SUPPORT_GFX_CGCG |
2016-04-08 00:26:46 -04:00
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
2016-04-08 00:39:54 -04:00
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
2016-08-03 10:16:17 -04:00
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCE_MGCG ;
2016-07-28 09:33:56 -04:00
/* rev0 hardware requires workarounds to support PG */
2016-02-04 23:31:32 -05:00
adev - > pg_flags = 0 ;
2016-08-12 13:47:08 -04:00
if ( adev - > rev_id ! = 0x00 | | CZ_REV_BRISTOL ( adev - > pdev - > revision ) ) {
2017-08-15 23:00:16 -04:00
adev - > pg_flags | = AMD_PG_SUPPORT_GFX_SMG |
2016-07-28 09:35:57 -04:00
AMD_PG_SUPPORT_GFX_PIPELINE |
2016-12-07 17:48:48 +08:00
AMD_PG_SUPPORT_CP |
2016-07-28 09:36:26 -04:00
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE ;
2016-07-28 09:33:56 -04:00
}
2015-04-20 17:31:14 -04:00
adev - > external_rev_id = adev - > rev_id + 0x1 ;
break ;
2016-03-23 13:17:04 -04:00
case CHIP_STONEY :
2016-04-07 23:17:15 -04:00
adev - > cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
2016-04-13 12:41:50 -04:00
AMD_CG_SUPPORT_GFX_MGLS |
2016-06-02 08:52:39 -04:00
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CGLS |
2016-04-13 12:41:50 -04:00
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
2016-04-08 00:40:49 -04:00
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
2016-08-03 11:34:35 -04:00
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCE_MGCG ;
2016-10-19 13:06:14 -04:00
adev - > pg_flags = AMD_PG_SUPPORT_GFX_PG |
2016-07-28 09:38:13 -04:00
AMD_PG_SUPPORT_GFX_SMG |
2016-07-28 09:38:29 -04:00
AMD_PG_SUPPORT_GFX_PIPELINE |
2016-12-07 17:48:48 +08:00
AMD_PG_SUPPORT_CP |
2016-07-28 09:38:45 -04:00
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE ;
2016-09-01 13:49:33 -04:00
adev - > external_rev_id = adev - > rev_id + 0x61 ;
2016-03-23 13:17:04 -04:00
break ;
2015-04-20 17:31:14 -04:00
default :
/* FIXME: not supported yet */
return - EINVAL ;
}
2017-04-21 14:06:09 +08:00
if ( amdgpu_sriov_vf ( adev ) ) {
amdgpu_virt_init_setting ( adev ) ;
xgpu_vi_mailbox_set_irq_funcs ( adev ) ;
}
2015-04-20 17:31:14 -04:00
return 0 ;
}
2017-01-12 15:22:18 +08:00
static int vi_common_late_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) )
xgpu_vi_mailbox_get_irq ( adev ) ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_sw_init ( void * handle )
2015-04-20 17:31:14 -04:00
{
2017-01-12 15:22:18 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) )
xgpu_vi_mailbox_add_irq_id ( adev ) ;
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_sw_fini ( void * handle )
2015-04-20 17:31:14 -04:00
{
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_hw_init ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
/* move the golden regs per IP block */
vi_init_golden_registers ( adev ) ;
/* enable pcie gen2/3 link */
vi_pcie_gen3_enable ( adev ) ;
/* enable aspm */
vi_program_aspm ( adev ) ;
/* enable the doorbell aperture */
vi_enable_doorbell_aperture ( adev , true ) ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_hw_fini ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
/* enable the doorbell aperture */
vi_enable_doorbell_aperture ( adev , false ) ;
2017-01-18 12:50:14 +08:00
if ( amdgpu_sriov_vf ( adev ) )
xgpu_vi_mailbox_put_irq ( adev ) ;
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_suspend ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
return vi_common_hw_fini ( adev ) ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_resume ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
return vi_common_hw_init ( adev ) ;
}
2015-05-22 14:39:35 -04:00
static bool vi_common_is_idle ( void * handle )
2015-04-20 17:31:14 -04:00
{
return true ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_wait_for_idle ( void * handle )
2015-04-20 17:31:14 -04:00
{
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_soft_reset ( void * handle )
2015-04-20 17:31:14 -04:00
{
return 0 ;
}
2016-04-08 01:37:44 -04:00
static void vi_update_bif_medium_grain_light_sleep ( struct amdgpu_device * adev ,
bool enable )
2015-11-12 16:59:47 -05:00
{
uint32_t temp , data ;
temp = data = RREG32_PCIE ( ixPCIE_CNTL2 ) ;
2016-04-08 00:52:58 -04:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_LS ) )
2015-11-12 16:59:47 -05:00
data | = PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK ;
else
data & = ~ ( PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK ) ;
if ( temp ! = data )
WREG32_PCIE ( ixPCIE_CNTL2 , data ) ;
}
2016-04-08 01:37:44 -04:00
static void vi_update_hdp_medium_grain_clock_gating ( struct amdgpu_device * adev ,
bool enable )
2015-11-12 16:59:47 -05:00
{
uint32_t temp , data ;
temp = data = RREG32 ( mmHDP_HOST_PATH_CNTL ) ;
2016-04-08 00:52:58 -04:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_MGCG ) )
2015-11-12 16:59:47 -05:00
data & = ~ HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK ;
else
data | = HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK ;
if ( temp ! = data )
WREG32 ( mmHDP_HOST_PATH_CNTL , data ) ;
}
2016-04-08 01:37:44 -04:00
static void vi_update_hdp_light_sleep ( struct amdgpu_device * adev ,
bool enable )
2015-11-12 16:59:47 -05:00
{
uint32_t temp , data ;
temp = data = RREG32 ( mmHDP_MEM_POWER_LS ) ;
2016-04-08 00:52:58 -04:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_LS ) )
2015-11-12 16:59:47 -05:00
data | = HDP_MEM_POWER_LS__LS_ENABLE_MASK ;
else
data & = ~ HDP_MEM_POWER_LS__LS_ENABLE_MASK ;
if ( temp ! = data )
WREG32 ( mmHDP_MEM_POWER_LS , data ) ;
}
2016-12-08 10:58:15 +08:00
static void vi_update_drm_light_sleep ( struct amdgpu_device * adev ,
bool enable )
{
uint32_t temp , data ;
temp = data = RREG32 ( 0x157a ) ;
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_DRM_LS ) )
data | = 1 ;
else
data & = ~ 1 ;
if ( temp ! = data )
WREG32 ( 0x157a , data ) ;
}
2016-04-08 01:37:44 -04:00
static void vi_update_rom_medium_grain_clock_gating ( struct amdgpu_device * adev ,
bool enable )
2015-11-12 16:59:47 -05:00
{
uint32_t temp , data ;
temp = data = RREG32_SMC ( ixCGTT_ROM_CLK_CTRL0 ) ;
2016-04-08 00:52:58 -04:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_ROM_MGCG ) )
2015-11-12 16:59:47 -05:00
data & = ~ ( CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK ) ;
else
data | = CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK ;
if ( temp ! = data )
WREG32_SMC ( ixCGTT_ROM_CLK_CTRL0 , data ) ;
}
2016-09-18 16:54:00 +08:00
static int vi_common_set_clockgating_state_by_smu ( void * handle ,
enum amd_clockgating_state state )
{
2016-12-07 19:11:49 +08:00
uint32_t msg_id , pp_state = 0 ;
uint32_t pp_support_state = 0 ;
2016-09-18 16:54:00 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2016-12-07 19:11:49 +08:00
if ( adev - > cg_flags & ( AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG ) ) {
if ( adev - > cg_flags & AMD_CG_SUPPORT_MC_LS ) {
2018-07-10 16:51:22 -05:00
pp_support_state = PP_STATE_SUPPORT_LS ;
2016-12-07 19:11:49 +08:00
pp_state = PP_STATE_LS ;
}
if ( adev - > cg_flags & AMD_CG_SUPPORT_MC_MGCG ) {
2018-07-10 16:51:22 -05:00
pp_support_state | = PP_STATE_SUPPORT_CG ;
2016-12-07 19:11:49 +08:00
pp_state | = PP_STATE_CG ;
}
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_MC ,
pp_support_state ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
if ( adev - > cg_flags & ( AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG ) ) {
if ( adev - > cg_flags & AMD_CG_SUPPORT_SDMA_LS ) {
2018-07-10 16:51:22 -05:00
pp_support_state = PP_STATE_SUPPORT_LS ;
2016-12-07 19:11:49 +08:00
pp_state = PP_STATE_LS ;
}
if ( adev - > cg_flags & AMD_CG_SUPPORT_SDMA_MGCG ) {
2018-07-10 16:51:22 -05:00
pp_support_state | = PP_STATE_SUPPORT_CG ;
2016-12-07 19:11:49 +08:00
pp_state | = PP_STATE_CG ;
}
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_SDMA ,
pp_support_state ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
if ( adev - > cg_flags & ( AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG ) ) {
if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_LS ) {
2018-07-10 16:51:22 -05:00
pp_support_state = PP_STATE_SUPPORT_LS ;
2016-12-07 19:11:49 +08:00
pp_state = PP_STATE_LS ;
}
if ( adev - > cg_flags & AMD_CG_SUPPORT_HDP_MGCG ) {
2018-07-10 16:51:22 -05:00
pp_support_state | = PP_STATE_SUPPORT_CG ;
2016-12-07 19:11:49 +08:00
pp_state | = PP_STATE_CG ;
}
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_HDP ,
pp_support_state ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
2016-09-18 16:54:00 +08:00
2016-12-07 19:11:49 +08:00
if ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_LS ) {
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
else
pp_state = PP_STATE_LS ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_BIF ,
PP_STATE_SUPPORT_LS ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
if ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_MGCG ) {
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
else
pp_state = PP_STATE_CG ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_BIF ,
PP_STATE_SUPPORT_CG ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
if ( adev - > cg_flags & AMD_CG_SUPPORT_DRM_LS ) {
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
else
pp_state = PP_STATE_LS ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_DRM ,
PP_STATE_SUPPORT_LS ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
if ( adev - > cg_flags & AMD_CG_SUPPORT_ROM_MGCG ) {
if ( state = = AMD_CG_STATE_UNGATE )
pp_state = 0 ;
else
pp_state = PP_STATE_CG ;
msg_id = PP_CG_MSG_ID ( PP_GROUP_SYS ,
PP_BLOCK_SYS_ROM ,
PP_STATE_SUPPORT_CG ,
pp_state ) ;
2017-09-26 13:39:38 +08:00
if ( adev - > powerplay . pp_funcs - > set_clockgating_by_smu )
amdgpu_dpm_set_clockgating_by_smu ( adev , msg_id ) ;
2016-12-07 19:11:49 +08:00
}
2016-09-18 16:54:00 +08:00
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_set_clockgating_state ( void * handle ,
2016-04-08 00:52:58 -04:00
enum amd_clockgating_state state )
2015-04-20 17:31:14 -04:00
{
2015-11-12 16:59:47 -05:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2017-01-23 10:49:33 +08:00
if ( amdgpu_sriov_vf ( adev ) )
return 0 ;
2015-11-12 16:59:47 -05:00
switch ( adev - > asic_type ) {
case CHIP_FIJI :
2016-04-08 01:37:44 -04:00
vi_update_bif_medium_grain_light_sleep ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-04-08 01:37:44 -04:00
vi_update_hdp_medium_grain_clock_gating ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-04-08 01:37:44 -04:00
vi_update_hdp_light_sleep ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-04-08 01:37:44 -04:00
vi_update_rom_medium_grain_clock_gating ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-04-08 01:37:44 -04:00
break ;
case CHIP_CARRIZO :
case CHIP_STONEY :
vi_update_bif_medium_grain_light_sleep ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-04-08 01:37:44 -04:00
vi_update_hdp_medium_grain_clock_gating ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-04-08 01:37:44 -04:00
vi_update_hdp_light_sleep ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2016-12-08 10:58:15 +08:00
vi_update_drm_light_sleep ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2015-11-12 16:59:47 -05:00
break ;
2016-09-18 16:54:00 +08:00
case CHIP_TONGA :
case CHIP_POLARIS10 :
case CHIP_POLARIS11 :
2016-12-14 15:32:28 -05:00
case CHIP_POLARIS12 :
2018-04-11 15:28:28 -05:00
case CHIP_VEGAM :
2016-09-18 16:54:00 +08:00
vi_common_set_clockgating_state_by_smu ( adev , state ) ;
2015-11-12 16:59:47 -05:00
default :
break ;
}
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int vi_common_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
2015-04-20 17:31:14 -04:00
{
return 0 ;
}
2017-01-05 20:48:06 +08:00
static void vi_common_get_clockgating_state ( void * handle , u32 * flags )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
int data ;
2017-01-23 10:49:33 +08:00
if ( amdgpu_sriov_vf ( adev ) )
* flags = 0 ;
2017-01-05 20:48:06 +08:00
/* AMD_CG_SUPPORT_BIF_LS */
data = RREG32_PCIE ( ixPCIE_CNTL2 ) ;
if ( data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK )
* flags | = AMD_CG_SUPPORT_BIF_LS ;
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32 ( mmHDP_MEM_POWER_LS ) ;
if ( data & HDP_MEM_POWER_LS__LS_ENABLE_MASK )
* flags | = AMD_CG_SUPPORT_HDP_LS ;
/* AMD_CG_SUPPORT_HDP_MGCG */
data = RREG32 ( mmHDP_HOST_PATH_CNTL ) ;
if ( ! ( data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK ) )
* flags | = AMD_CG_SUPPORT_HDP_MGCG ;
/* AMD_CG_SUPPORT_ROM_MGCG */
data = RREG32_SMC ( ixCGTT_ROM_CLK_CTRL0 ) ;
if ( ! ( data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK ) )
* flags | = AMD_CG_SUPPORT_ROM_MGCG ;
}
2016-10-13 17:41:13 -04:00
static const struct amd_ip_funcs vi_common_ip_funcs = {
2016-05-04 14:28:35 -04:00
. name = " vi_common " ,
2015-04-20 17:31:14 -04:00
. early_init = vi_common_early_init ,
2017-01-12 15:22:18 +08:00
. late_init = vi_common_late_init ,
2015-04-20 17:31:14 -04:00
. sw_init = vi_common_sw_init ,
. sw_fini = vi_common_sw_fini ,
. hw_init = vi_common_hw_init ,
. hw_fini = vi_common_hw_fini ,
. suspend = vi_common_suspend ,
. resume = vi_common_resume ,
. is_idle = vi_common_is_idle ,
. wait_for_idle = vi_common_wait_for_idle ,
. soft_reset = vi_common_soft_reset ,
. set_clockgating_state = vi_common_set_clockgating_state ,
. set_powergating_state = vi_common_set_powergating_state ,
2017-01-05 20:48:06 +08:00
. get_clockgating_state = vi_common_get_clockgating_state ,
2015-04-20 17:31:14 -04:00
} ;
2016-10-13 17:41:13 -04:00
static const struct amdgpu_ip_block_version vi_common_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_COMMON ,
. major = 1 ,
. minor = 0 ,
. rev = 0 ,
. funcs = & vi_common_ip_funcs ,
} ;
int vi_set_ip_blocks ( struct amdgpu_device * adev )
{
2017-01-09 11:49:27 +08:00
/* in early init stage, vbios code won't work */
vi_detect_hw_virtualization ( adev ) ;
2017-01-12 15:22:18 +08:00
if ( amdgpu_sriov_vf ( adev ) )
adev - > virt . ops = & xgpu_vi_virt_ops ;
2016-10-13 17:41:13 -04:00
switch ( adev - > asic_type ) {
case CHIP_TOPAZ :
/* topaz has no DCE, UVD, VCE */
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & vi_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v7_4_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & iceland_ih_ip_block ) ;
2018-09-30 17:35:12 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v2_4_ip_block ) ;
2018-03-12 19:52:23 +08:00
amdgpu_device_ip_block_add ( adev , & pp_smu_ip_block ) ;
2016-10-13 17:41:13 -04:00
if ( adev - > enable_virtual_display )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2016-10-13 17:41:13 -04:00
break ;
case CHIP_FIJI :
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & vi_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v8_5_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & tonga_ih_ip_block ) ;
2018-09-30 17:35:12 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v3_0_ip_block ) ;
2018-03-12 19:52:23 +08:00
amdgpu_device_ip_block_add ( adev , & pp_smu_ip_block ) ;
2017-01-09 11:49:27 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2017-09-12 15:58:20 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2017-09-12 15:58:20 -04:00
# endif
2016-10-13 17:41:13 -04:00
else
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_v10_1_ip_block ) ;
2017-01-09 11:49:27 +08:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & uvd_v6_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & vce_v3_0_ip_block ) ;
2017-01-09 11:49:27 +08:00
}
2016-10-13 17:41:13 -04:00
break ;
case CHIP_TONGA :
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & vi_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & tonga_ih_ip_block ) ;
2018-09-30 17:35:12 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v3_0_ip_block ) ;
2018-03-12 19:52:23 +08:00
amdgpu_device_ip_block_add ( adev , & pp_smu_ip_block ) ;
2017-01-09 11:49:27 +08:00
if ( adev - > enable_virtual_display | | amdgpu_sriov_vf ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2017-09-12 15:58:20 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2017-09-12 15:58:20 -04:00
# endif
2016-10-13 17:41:13 -04:00
else
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_v10_0_ip_block ) ;
2017-01-09 11:49:27 +08:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & uvd_v5_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & vce_v3_0_ip_block ) ;
2017-01-09 11:49:27 +08:00
}
2016-10-13 17:41:13 -04:00
break ;
case CHIP_POLARIS10 :
2018-04-11 15:28:28 -05:00
case CHIP_POLARIS11 :
2016-12-14 15:32:28 -05:00
case CHIP_POLARIS12 :
2018-04-11 15:28:28 -05:00
case CHIP_VEGAM :
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & vi_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v8_1_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & tonga_ih_ip_block ) ;
2018-09-30 17:35:12 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v3_1_ip_block ) ;
2018-03-12 19:52:23 +08:00
amdgpu_device_ip_block_add ( adev , & pp_smu_ip_block ) ;
2016-10-13 17:41:13 -04:00
if ( adev - > enable_virtual_display )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2017-09-12 15:58:20 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2017-09-12 15:58:20 -04:00
# endif
2016-10-13 17:41:13 -04:00
else
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_v11_2_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & uvd_v6_3_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & vce_v3_4_ip_block ) ;
2016-10-13 17:41:13 -04:00
break ;
case CHIP_CARRIZO :
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & vi_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & cz_ih_ip_block ) ;
2018-09-30 17:35:12 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v3_0_ip_block ) ;
2018-03-12 19:52:23 +08:00
amdgpu_device_ip_block_add ( adev , & pp_smu_ip_block ) ;
2016-10-13 17:41:13 -04:00
if ( adev - > enable_virtual_display )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2017-09-12 15:58:20 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2017-09-12 15:58:20 -04:00
# endif
2016-10-13 17:41:13 -04:00
else
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_v11_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & uvd_v6_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & vce_v3_1_ip_block ) ;
2016-10-13 17:41:13 -04:00
# if defined(CONFIG_DRM_AMD_ACP)
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & acp_ip_block ) ;
2016-10-13 17:41:13 -04:00
# endif
break ;
case CHIP_STONEY :
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & vi_common_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & gmc_v8_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & cz_ih_ip_block ) ;
2018-09-30 17:35:12 +08:00
amdgpu_device_ip_block_add ( adev , & gfx_v8_1_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & sdma_v3_0_ip_block ) ;
2018-03-12 19:52:23 +08:00
amdgpu_device_ip_block_add ( adev , & pp_smu_ip_block ) ;
2016-10-13 17:41:13 -04:00
if ( adev - > enable_virtual_display )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_virtual_ip_block ) ;
2017-09-12 15:58:20 -04:00
# if defined(CONFIG_DRM_AMD_DC)
else if ( amdgpu_device_has_dc_support ( adev ) )
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dm_ip_block ) ;
2017-09-12 15:58:20 -04:00
# endif
2016-10-13 17:41:13 -04:00
else
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & dce_v11_0_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & uvd_v6_2_ip_block ) ;
amdgpu_device_ip_block_add ( adev , & vce_v3_4_ip_block ) ;
2016-10-13 17:41:13 -04:00
# if defined(CONFIG_DRM_AMD_ACP)
2017-12-15 16:18:00 -05:00
amdgpu_device_ip_block_add ( adev , & acp_ip_block ) ;
2016-10-13 17:41:13 -04:00
# endif
break ;
default :
/* FIXME: not supported yet */
return - EINVAL ;
}
return 0 ;
}
2018-11-19 09:51:20 -06:00
void legacy_doorbell_index_init ( struct amdgpu_device * adev )
{
adev - > doorbell_index . kiq = AMDGPU_DOORBELL_KIQ ;
adev - > doorbell_index . mec_ring0 = AMDGPU_DOORBELL_MEC_RING0 ;
adev - > doorbell_index . mec_ring1 = AMDGPU_DOORBELL_MEC_RING1 ;
adev - > doorbell_index . mec_ring2 = AMDGPU_DOORBELL_MEC_RING2 ;
adev - > doorbell_index . mec_ring3 = AMDGPU_DOORBELL_MEC_RING3 ;
adev - > doorbell_index . mec_ring4 = AMDGPU_DOORBELL_MEC_RING4 ;
adev - > doorbell_index . mec_ring5 = AMDGPU_DOORBELL_MEC_RING5 ;
adev - > doorbell_index . mec_ring6 = AMDGPU_DOORBELL_MEC_RING6 ;
adev - > doorbell_index . mec_ring7 = AMDGPU_DOORBELL_MEC_RING7 ;
adev - > doorbell_index . gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0 ;
adev - > doorbell_index . sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0 ;
adev - > doorbell_index . sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1 ;
adev - > doorbell_index . ih = AMDGPU_DOORBELL_IH ;
adev - > doorbell_index . max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT ;
}