2015-04-20 17:31:14 -04:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
2019-06-10 00:07:57 +02:00
2015-04-20 17:31:14 -04:00
# include <linux/firmware.h>
2019-06-10 00:07:57 +02:00
# include <linux/module.h>
# include <linux/pci.h>
2018-02-09 10:44:09 +08:00
# include <drm/drm_cache.h>
2015-04-20 17:31:14 -04:00
# include "amdgpu.h"
# include "gmc_v8_0.h"
# include "amdgpu_ucode.h"
2018-07-11 22:32:49 -04:00
# include "amdgpu_amdkfd.h"
2018-08-13 11:41:35 -05:00
# include "amdgpu_gem.h"
2015-04-20 17:31:14 -04:00
# include "gmc/gmc_8_1_d.h"
# include "gmc/gmc_8_1_sh_mask.h"
# include "bif/bif_5_0_d.h"
# include "bif/bif_5_0_sh_mask.h"
# include "oss/oss_3_0_d.h"
# include "oss/oss_3_0_sh_mask.h"
2017-07-24 23:05:20 -04:00
# include "dce/dce_10_0_d.h"
# include "dce/dce_10_0_sh_mask.h"
2015-04-20 17:31:14 -04:00
# include "vid.h"
# include "vi.h"
2017-03-31 20:14:33 -04:00
# include "amdgpu_atombios.h"
2018-05-25 10:06:52 -04:00
# include "ivsrcid/ivsrcid_vislands30.h"
2018-01-12 15:26:08 +01:00
static void gmc_v8_0_set_gmc_funcs ( struct amdgpu_device * adev ) ;
2015-04-20 17:31:14 -04:00
static void gmc_v8_0_set_irq_funcs ( struct amdgpu_device * adev ) ;
2016-06-23 23:41:48 -04:00
static int gmc_v8_0_wait_for_idle ( void * handle ) ;
2015-04-20 17:31:14 -04:00
2015-05-13 22:49:04 +08:00
MODULE_FIRMWARE ( " amdgpu/tonga_mc.bin " ) ;
2016-03-14 18:33:29 -04:00
MODULE_FIRMWARE ( " amdgpu/polaris11_mc.bin " ) ;
MODULE_FIRMWARE ( " amdgpu/polaris10_mc.bin " ) ;
2016-12-14 15:32:28 -05:00
MODULE_FIRMWARE ( " amdgpu/polaris12_mc.bin " ) ;
2018-11-28 23:25:41 -05:00
MODULE_FIRMWARE ( " amdgpu/polaris11_k_mc.bin " ) ;
MODULE_FIRMWARE ( " amdgpu/polaris10_k_mc.bin " ) ;
2018-11-22 17:53:00 +08:00
MODULE_FIRMWARE ( " amdgpu/polaris12_k_mc.bin " ) ;
2015-04-20 17:31:14 -04:00
static const u32 golden_settings_tonga_a11 [ ] =
{
mmMC_ARB_WTM_GRPWT_RD , 0x00000003 , 0x00000000 ,
mmMC_HUB_RDREQ_DMIF_LIMIT , 0x0000007f , 0x00000028 ,
mmMC_HUB_WDP_UMC , 0x00007fb6 , 0x00000991 ,
mmVM_PRT_APERTURE0_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE1_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE2_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE3_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
} ;
static const u32 tonga_mgcg_cgcg_init [ ] =
{
mmMC_MEM_POWER_LS , 0xffffffff , 0x00000104
} ;
2015-07-08 01:11:52 +08:00
static const u32 golden_settings_fiji_a10 [ ] =
{
mmVM_PRT_APERTURE0_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE1_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE2_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE3_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
} ;
static const u32 fiji_mgcg_cgcg_init [ ] =
{
mmMC_MEM_POWER_LS , 0xffffffff , 0x00000104
} ;
2016-03-14 18:33:29 -04:00
static const u32 golden_settings_polaris11_a11 [ ] =
2016-03-11 14:28:53 -05:00
{
mmVM_PRT_APERTURE0_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE1_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE2_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE3_LOW_ADDR , 0x0fffffff , 0x0fffffff
} ;
2016-03-14 18:33:29 -04:00
static const u32 golden_settings_polaris10_a11 [ ] =
2016-03-11 14:28:53 -05:00
{
mmMC_ARB_WTM_GRPWT_RD , 0x00000003 , 0x00000000 ,
mmVM_PRT_APERTURE0_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE1_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE2_LOW_ADDR , 0x0fffffff , 0x0fffffff ,
mmVM_PRT_APERTURE3_LOW_ADDR , 0x0fffffff , 0x0fffffff
} ;
2015-04-20 17:31:14 -04:00
static const u32 cz_mgcg_cgcg_init [ ] =
{
mmMC_MEM_POWER_LS , 0xffffffff , 0x00000104
} ;
2015-10-08 16:26:41 -04:00
static const u32 stoney_mgcg_cgcg_init [ ] =
{
2016-10-18 00:32:00 -04:00
mmATC_MISC_CG , 0xffffffff , 0x000c0200 ,
2015-10-08 16:26:41 -04:00
mmMC_MEM_POWER_LS , 0xffffffff , 0x00000104
} ;
2016-08-02 11:30:05 +08:00
static const u32 golden_settings_stoney_common [ ] =
{
mmMC_HUB_RDREQ_UVD , MC_HUB_RDREQ_UVD__PRESCALE_MASK , 0x00000004 ,
mmMC_RD_GRP_OTH , MC_RD_GRP_OTH__UVD_MASK , 0x00600000
} ;
2015-10-08 16:26:41 -04:00
2015-04-20 17:31:14 -04:00
static void gmc_v8_0_init_golden_registers ( struct amdgpu_device * adev )
{
switch ( adev - > asic_type ) {
2015-07-08 01:11:52 +08:00
case CHIP_FIJI :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
fiji_mgcg_cgcg_init ,
ARRAY_SIZE ( fiji_mgcg_cgcg_init ) ) ;
amdgpu_device_program_register_sequence ( adev ,
golden_settings_fiji_a10 ,
ARRAY_SIZE ( golden_settings_fiji_a10 ) ) ;
2015-07-08 01:11:52 +08:00
break ;
2015-04-20 17:31:14 -04:00
case CHIP_TONGA :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
tonga_mgcg_cgcg_init ,
ARRAY_SIZE ( tonga_mgcg_cgcg_init ) ) ;
amdgpu_device_program_register_sequence ( adev ,
golden_settings_tonga_a11 ,
ARRAY_SIZE ( golden_settings_tonga_a11 ) ) ;
2015-04-20 17:31:14 -04:00
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS11 :
2016-12-14 15:32:28 -05:00
case CHIP_POLARIS12 :
2018-04-11 15:18:20 -05:00
case CHIP_VEGAM :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
golden_settings_polaris11_a11 ,
ARRAY_SIZE ( golden_settings_polaris11_a11 ) ) ;
2016-03-11 14:28:53 -05:00
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS10 :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
golden_settings_polaris10_a11 ,
ARRAY_SIZE ( golden_settings_polaris10_a11 ) ) ;
2016-03-11 14:28:53 -05:00
break ;
2015-04-20 17:31:14 -04:00
case CHIP_CARRIZO :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
cz_mgcg_cgcg_init ,
ARRAY_SIZE ( cz_mgcg_cgcg_init ) ) ;
2015-04-20 17:31:14 -04:00
break ;
2015-10-08 16:26:41 -04:00
case CHIP_STONEY :
2017-12-14 16:20:19 -05:00
amdgpu_device_program_register_sequence ( adev ,
stoney_mgcg_cgcg_init ,
ARRAY_SIZE ( stoney_mgcg_cgcg_init ) ) ;
amdgpu_device_program_register_sequence ( adev ,
golden_settings_stoney_common ,
ARRAY_SIZE ( golden_settings_stoney_common ) ) ;
2015-10-08 16:26:41 -04:00
break ;
2015-04-20 17:31:14 -04:00
default :
break ;
}
}
2016-12-08 14:53:27 -05:00
static void gmc_v8_0_mc_stop ( struct amdgpu_device * adev )
2015-04-20 17:31:14 -04:00
{
u32 blackout ;
2016-06-23 23:41:48 -04:00
gmc_v8_0_wait_for_idle ( adev ) ;
2015-04-20 17:31:14 -04:00
blackout = RREG32 ( mmMC_SHARED_BLACKOUT_CNTL ) ;
if ( REG_GET_FIELD ( blackout , MC_SHARED_BLACKOUT_CNTL , BLACKOUT_MODE ) ! = 1 ) {
/* Block CPU access */
WREG32 ( mmBIF_FB_EN , 0 ) ;
/* blackout the MC */
blackout = REG_SET_FIELD ( blackout ,
MC_SHARED_BLACKOUT_CNTL , BLACKOUT_MODE , 1 ) ;
WREG32 ( mmMC_SHARED_BLACKOUT_CNTL , blackout ) ;
}
/* wait for the MC to settle */
udelay ( 100 ) ;
}
2016-12-08 14:53:27 -05:00
static void gmc_v8_0_mc_resume ( struct amdgpu_device * adev )
2015-04-20 17:31:14 -04:00
{
u32 tmp ;
/* unblackout the MC */
tmp = RREG32 ( mmMC_SHARED_BLACKOUT_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , MC_SHARED_BLACKOUT_CNTL , BLACKOUT_MODE , 0 ) ;
WREG32 ( mmMC_SHARED_BLACKOUT_CNTL , tmp ) ;
/* allow CPU access */
tmp = REG_SET_FIELD ( 0 , BIF_FB_EN , FB_READ_EN , 1 ) ;
tmp = REG_SET_FIELD ( tmp , BIF_FB_EN , FB_WRITE_EN , 1 ) ;
WREG32 ( mmBIF_FB_EN , tmp ) ;
}
/**
* gmc_v8_0_init_microcode - load ucode images from disk
*
* @ adev : amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver ( not loaded into hw ) .
* Returns 0 on success , error on failure .
*/
static int gmc_v8_0_init_microcode ( struct amdgpu_device * adev )
{
const char * chip_name ;
char fw_name [ 30 ] ;
int err ;
DRM_DEBUG ( " \n " ) ;
switch ( adev - > asic_type ) {
case CHIP_TONGA :
chip_name = " tonga " ;
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS11 :
2018-11-28 23:25:41 -05:00
if ( ( ( adev - > pdev - > device = = 0x67ef ) & &
( ( adev - > pdev - > revision = = 0xe0 ) | |
( adev - > pdev - > revision = = 0xe5 ) ) ) | |
( ( adev - > pdev - > device = = 0x67ff ) & &
( ( adev - > pdev - > revision = = 0xcf ) | |
( adev - > pdev - > revision = = 0xef ) | |
( adev - > pdev - > revision = = 0xff ) ) ) )
chip_name = " polaris11_k " ;
else if ( ( adev - > pdev - > device = = 0x67ef ) & &
( adev - > pdev - > revision = = 0xe2 ) )
chip_name = " polaris11_k " ;
else
chip_name = " polaris11 " ;
2016-03-11 14:28:53 -05:00
break ;
2016-03-14 18:33:29 -04:00
case CHIP_POLARIS10 :
2018-11-28 23:25:41 -05:00
if ( ( adev - > pdev - > device = = 0x67df ) & &
( ( adev - > pdev - > revision = = 0xe1 ) | |
( adev - > pdev - > revision = = 0xf7 ) ) )
chip_name = " polaris10_k " ;
else
chip_name = " polaris10 " ;
2016-03-11 14:28:53 -05:00
break ;
2016-12-14 15:32:28 -05:00
case CHIP_POLARIS12 :
2018-11-22 17:53:00 +08:00
if ( ( ( adev - > pdev - > device = = 0x6987 ) & &
( ( adev - > pdev - > revision = = 0xc0 ) | |
( adev - > pdev - > revision = = 0xc3 ) ) ) | |
( ( adev - > pdev - > device = = 0x6981 ) & &
( ( adev - > pdev - > revision = = 0x00 ) | |
( adev - > pdev - > revision = = 0x01 ) | |
2018-11-28 23:25:41 -05:00
( adev - > pdev - > revision = = 0x10 ) ) ) )
2018-11-22 17:53:00 +08:00
chip_name = " polaris12_k " ;
2018-11-28 23:25:41 -05:00
else
chip_name = " polaris12 " ;
2016-12-14 15:32:28 -05:00
break ;
2015-07-08 01:11:52 +08:00
case CHIP_FIJI :
2015-04-20 17:31:14 -04:00
case CHIP_CARRIZO :
2015-10-08 16:26:41 -04:00
case CHIP_STONEY :
2017-11-16 13:15:12 -05:00
case CHIP_VEGAM :
2015-04-20 17:31:14 -04:00
return 0 ;
default : BUG ( ) ;
}
2015-05-13 22:49:04 +08:00
snprintf ( fw_name , sizeof ( fw_name ) , " amdgpu/%s_mc.bin " , chip_name ) ;
2018-01-12 14:52:22 +01:00
err = request_firmware ( & adev - > gmc . fw , fw_name , adev - > dev ) ;
2015-04-20 17:31:14 -04:00
if ( err )
goto out ;
2018-01-12 14:52:22 +01:00
err = amdgpu_ucode_validate ( adev - > gmc . fw ) ;
2015-04-20 17:31:14 -04:00
out :
if ( err ) {
2017-02-28 04:55:52 -08:00
pr_err ( " mc: Failed to load firmware \" %s \" \n " , fw_name ) ;
2018-01-12 14:52:22 +01:00
release_firmware ( adev - > gmc . fw ) ;
adev - > gmc . fw = NULL ;
2015-04-20 17:31:14 -04:00
}
return err ;
}
/**
2017-03-21 12:51:48 +08:00
* gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
2015-04-20 17:31:14 -04:00
*
* @ adev : amdgpu_device pointer
*
2019-05-01 08:22:50 -04:00
* Load the GDDR MC ucode into the hw ( VI ) .
2015-04-20 17:31:14 -04:00
* Returns 0 on success , error on failure .
*/
2017-03-21 12:51:48 +08:00
static int gmc_v8_0_tonga_mc_load_microcode ( struct amdgpu_device * adev )
2015-04-20 17:31:14 -04:00
{
const struct mc_firmware_header_v1_0 * hdr ;
const __le32 * fw_data = NULL ;
const __le32 * io_mc_regs = NULL ;
2016-08-21 20:10:41 +02:00
u32 running ;
2015-04-20 17:31:14 -04:00
int i , ucode_size , regs_size ;
2016-02-01 11:29:54 -05:00
/* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case .
2016-03-31 13:26:59 +08:00
* Skip MC ucode loading on VF , because hypervisor will do that
* for this adaptor .
2016-02-01 11:29:54 -05:00
*/
2016-03-31 13:26:59 +08:00
if ( amdgpu_sriov_bios ( adev ) )
2016-02-01 11:29:54 -05:00
return 0 ;
2018-01-12 14:52:22 +01:00
if ( ! adev - > gmc . fw )
2017-03-21 12:51:48 +08:00
return - EINVAL ;
2018-01-12 14:52:22 +01:00
hdr = ( const struct mc_firmware_header_v1_0 * ) adev - > gmc . fw - > data ;
2015-04-20 17:31:14 -04:00
amdgpu_ucode_print_mc_hdr ( & hdr - > header ) ;
2018-01-12 14:52:22 +01:00
adev - > gmc . fw_version = le32_to_cpu ( hdr - > header . ucode_version ) ;
2015-04-20 17:31:14 -04:00
regs_size = le32_to_cpu ( hdr - > io_debug_size_bytes ) / ( 4 * 2 ) ;
io_mc_regs = ( const __le32 * )
2018-01-12 14:52:22 +01:00
( adev - > gmc . fw - > data + le32_to_cpu ( hdr - > io_debug_array_offset_bytes ) ) ;
2015-04-20 17:31:14 -04:00
ucode_size = le32_to_cpu ( hdr - > header . ucode_size_bytes ) / 4 ;
fw_data = ( const __le32 * )
2018-01-12 14:52:22 +01:00
( adev - > gmc . fw - > data + le32_to_cpu ( hdr - > header . ucode_array_offset_bytes ) ) ;
2015-04-20 17:31:14 -04:00
running = REG_GET_FIELD ( RREG32 ( mmMC_SEQ_SUP_CNTL ) , MC_SEQ_SUP_CNTL , RUN ) ;
if ( running = = 0 ) {
/* reset the engine and set to writable */
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000008 ) ;
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000010 ) ;
/* load mc io regs */
for ( i = 0 ; i < regs_size ; i + + ) {
WREG32 ( mmMC_SEQ_IO_DEBUG_INDEX , le32_to_cpup ( io_mc_regs + + ) ) ;
WREG32 ( mmMC_SEQ_IO_DEBUG_DATA , le32_to_cpup ( io_mc_regs + + ) ) ;
}
/* load the MC ucode */
for ( i = 0 ; i < ucode_size ; i + + )
WREG32 ( mmMC_SEQ_SUP_PGM , le32_to_cpup ( fw_data + + ) ) ;
/* put the engine back into the active state */
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000008 ) ;
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000004 ) ;
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000001 ) ;
/* wait for training to complete */
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
if ( REG_GET_FIELD ( RREG32 ( mmMC_SEQ_TRAIN_WAKEUP_CNTL ) ,
MC_SEQ_TRAIN_WAKEUP_CNTL , TRAIN_DONE_D0 ) )
break ;
udelay ( 1 ) ;
}
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
if ( REG_GET_FIELD ( RREG32 ( mmMC_SEQ_TRAIN_WAKEUP_CNTL ) ,
MC_SEQ_TRAIN_WAKEUP_CNTL , TRAIN_DONE_D1 ) )
break ;
udelay ( 1 ) ;
}
}
return 0 ;
}
2017-03-21 12:51:48 +08:00
static int gmc_v8_0_polaris_mc_load_microcode ( struct amdgpu_device * adev )
{
const struct mc_firmware_header_v1_0 * hdr ;
const __le32 * fw_data = NULL ;
const __le32 * io_mc_regs = NULL ;
2018-11-28 23:28:17 -05:00
u32 data ;
2017-03-21 12:51:48 +08:00
int i , ucode_size , regs_size ;
/* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case .
* Skip MC ucode loading on VF , because hypervisor will do that
* for this adaptor .
*/
if ( amdgpu_sriov_bios ( adev ) )
return 0 ;
2018-01-12 14:52:22 +01:00
if ( ! adev - > gmc . fw )
2017-03-21 12:51:48 +08:00
return - EINVAL ;
2018-01-12 14:52:22 +01:00
hdr = ( const struct mc_firmware_header_v1_0 * ) adev - > gmc . fw - > data ;
2017-03-21 12:51:48 +08:00
amdgpu_ucode_print_mc_hdr ( & hdr - > header ) ;
2018-01-12 14:52:22 +01:00
adev - > gmc . fw_version = le32_to_cpu ( hdr - > header . ucode_version ) ;
2017-03-21 12:51:48 +08:00
regs_size = le32_to_cpu ( hdr - > io_debug_size_bytes ) / ( 4 * 2 ) ;
io_mc_regs = ( const __le32 * )
2018-01-12 14:52:22 +01:00
( adev - > gmc . fw - > data + le32_to_cpu ( hdr - > io_debug_array_offset_bytes ) ) ;
2017-03-21 12:51:48 +08:00
ucode_size = le32_to_cpu ( hdr - > header . ucode_size_bytes ) / 4 ;
fw_data = ( const __le32 * )
2018-01-12 14:52:22 +01:00
( adev - > gmc . fw - > data + le32_to_cpu ( hdr - > header . ucode_array_offset_bytes ) ) ;
2017-03-21 12:51:48 +08:00
data = RREG32 ( mmMC_SEQ_MISC0 ) ;
data & = ~ ( 0x40 ) ;
WREG32 ( mmMC_SEQ_MISC0 , data ) ;
/* load mc io regs */
for ( i = 0 ; i < regs_size ; i + + ) {
WREG32 ( mmMC_SEQ_IO_DEBUG_INDEX , le32_to_cpup ( io_mc_regs + + ) ) ;
WREG32 ( mmMC_SEQ_IO_DEBUG_DATA , le32_to_cpup ( io_mc_regs + + ) ) ;
}
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000008 ) ;
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000010 ) ;
/* load the MC ucode */
for ( i = 0 ; i < ucode_size ; i + + )
WREG32 ( mmMC_SEQ_SUP_PGM , le32_to_cpup ( fw_data + + ) ) ;
/* put the engine back into the active state */
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000008 ) ;
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000004 ) ;
WREG32 ( mmMC_SEQ_SUP_CNTL , 0x00000001 ) ;
/* wait for training to complete */
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
data = RREG32 ( mmMC_SEQ_MISC0 ) ;
if ( data & 0x80 )
break ;
udelay ( 1 ) ;
}
return 0 ;
}
2015-04-20 17:31:14 -04:00
static void gmc_v8_0_vram_gtt_location ( struct amdgpu_device * adev ,
2018-01-12 14:52:22 +01:00
struct amdgpu_gmc * mc )
2015-04-20 17:31:14 -04:00
{
2017-07-20 12:14:45 +08:00
u64 base = 0 ;
if ( ! amdgpu_sriov_vf ( adev ) )
base = RREG32 ( mmMC_VM_FB_LOCATION ) & 0xFFFF ;
2016-11-01 13:08:33 -04:00
base < < = 24 ;
2019-03-07 17:00:20 -06:00
amdgpu_gmc_vram_location ( adev , mc , base ) ;
2018-08-23 15:20:43 +02:00
amdgpu_gmc_gart_location ( adev , mc ) ;
2015-04-20 17:31:14 -04:00
}
/**
* gmc_v8_0_mc_program - program the GPU memory controller
*
* @ adev : amdgpu_device pointer
*
* Set the location of vram , gart , and AGP in the GPU ' s
2019-05-01 08:22:50 -04:00
* physical address space ( VI ) .
2015-04-20 17:31:14 -04:00
*/
static void gmc_v8_0_mc_program ( struct amdgpu_device * adev )
{
u32 tmp ;
int i , j ;
/* Initialize HDP */
for ( i = 0 , j = 0 ; i < 32 ; i + + , j + = 0x6 ) {
WREG32 ( ( 0xb05 + j ) , 0x00000000 ) ;
WREG32 ( ( 0xb06 + j ) , 0x00000000 ) ;
WREG32 ( ( 0xb07 + j ) , 0x00000000 ) ;
WREG32 ( ( 0xb08 + j ) , 0x00000000 ) ;
WREG32 ( ( 0xb09 + j ) , 0x00000000 ) ;
}
WREG32 ( mmHDP_REG_COHERENCY_FLUSH_CNTL , 0 ) ;
2016-06-23 23:41:48 -04:00
if ( gmc_v8_0_wait_for_idle ( ( void * ) adev ) ) {
2015-04-20 17:31:14 -04:00
dev_warn ( adev - > dev , " Wait for MC idle timedout ! \n " ) ;
}
2017-07-24 23:05:20 -04:00
if ( adev - > mode_info . num_crtc ) {
/* Lockout access through VGA aperture*/
tmp = RREG32 ( mmVGA_HDP_CONTROL ) ;
tmp = REG_SET_FIELD ( tmp , VGA_HDP_CONTROL , VGA_MEMORY_DISABLE , 1 ) ;
WREG32 ( mmVGA_HDP_CONTROL , tmp ) ;
/* disable VGA render */
tmp = RREG32 ( mmVGA_RENDER_CONTROL ) ;
tmp = REG_SET_FIELD ( tmp , VGA_RENDER_CONTROL , VGA_VSTATUS_CNTL , 0 ) ;
WREG32 ( mmVGA_RENDER_CONTROL , tmp ) ;
}
2015-04-20 17:31:14 -04:00
/* Update configuration */
WREG32 ( mmMC_VM_SYSTEM_APERTURE_LOW_ADDR ,
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_start > > 12 ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR ,
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_end > > 12 ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR ,
adev - > vram_scratch . gpu_addr > > 12 ) ;
2017-07-20 12:14:45 +08:00
if ( amdgpu_sriov_vf ( adev ) ) {
2018-01-12 14:52:22 +01:00
tmp = ( ( adev - > gmc . vram_end > > 24 ) & 0xFFFF ) < < 16 ;
tmp | = ( ( adev - > gmc . vram_start > > 24 ) & 0xFFFF ) ;
2017-07-20 12:14:45 +08:00
WREG32 ( mmMC_VM_FB_LOCATION , tmp ) ;
/* XXX double check these! */
2018-01-12 14:52:22 +01:00
WREG32 ( mmHDP_NONSURFACE_BASE , ( adev - > gmc . vram_start > > 8 ) ) ;
2017-07-20 12:14:45 +08:00
WREG32 ( mmHDP_NONSURFACE_INFO , ( 2 < < 7 ) | ( 1 < < 30 ) ) ;
WREG32 ( mmHDP_NONSURFACE_SIZE , 0x3FFFFFFF ) ;
}
2015-04-20 17:31:14 -04:00
WREG32 ( mmMC_VM_AGP_BASE , 0 ) ;
WREG32 ( mmMC_VM_AGP_TOP , 0x0FFFFFFF ) ;
WREG32 ( mmMC_VM_AGP_BOT , 0x0FFFFFFF ) ;
2016-06-23 23:41:48 -04:00
if ( gmc_v8_0_wait_for_idle ( ( void * ) adev ) ) {
2015-04-20 17:31:14 -04:00
dev_warn ( adev - > dev , " Wait for MC idle timedout ! \n " ) ;
}
WREG32 ( mmBIF_FB_EN , BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK ) ;
tmp = RREG32 ( mmHDP_MISC_CNTL ) ;
2016-03-03 14:47:54 +08:00
tmp = REG_SET_FIELD ( tmp , HDP_MISC_CNTL , FLUSH_INVALIDATE_CACHE , 0 ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmHDP_MISC_CNTL , tmp ) ;
tmp = RREG32 ( mmHDP_HOST_PATH_CNTL ) ;
WREG32 ( mmHDP_HOST_PATH_CNTL , tmp ) ;
}
/**
* gmc_v8_0_mc_init - initialize the memory controller driver params
*
* @ adev : amdgpu_device pointer
*
* Look up the amount of vram , vram width , and decide how to place
2019-05-01 08:22:50 -04:00
* vram and gart within the GPU ' s physical address space ( VI ) .
2015-04-20 17:31:14 -04:00
* Returns 0 for success .
*/
static int gmc_v8_0_mc_init ( struct amdgpu_device * adev )
{
2017-02-28 10:36:43 +01:00
int r ;
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_width = amdgpu_atombios_get_vram_width ( adev ) ;
if ( ! adev - > gmc . vram_width ) {
2017-03-31 20:14:33 -04:00
u32 tmp ;
int chansize , numchan ;
/* Get VRAM informations */
tmp = RREG32 ( mmMC_ARB_RAMCFG ) ;
if ( REG_GET_FIELD ( tmp , MC_ARB_RAMCFG , CHANSIZE ) ) {
chansize = 64 ;
} else {
chansize = 32 ;
}
tmp = RREG32 ( mmMC_SHARED_CHMAP ) ;
switch ( REG_GET_FIELD ( tmp , MC_SHARED_CHMAP , NOOFCHAN ) ) {
case 0 :
default :
numchan = 1 ;
break ;
case 1 :
numchan = 2 ;
break ;
case 2 :
numchan = 4 ;
break ;
case 3 :
numchan = 8 ;
break ;
case 4 :
numchan = 3 ;
break ;
case 5 :
numchan = 6 ;
break ;
case 6 :
numchan = 10 ;
break ;
case 7 :
numchan = 12 ;
break ;
case 8 :
numchan = 16 ;
break ;
}
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_width = numchan * chansize ;
2015-04-20 17:31:14 -04:00
}
/* size in MB on si */
2018-01-12 14:52:22 +01:00
adev - > gmc . mc_vram_size = RREG32 ( mmCONFIG_MEMSIZE ) * 1024ULL * 1024ULL ;
adev - > gmc . real_vram_size = RREG32 ( mmCONFIG_MEMSIZE ) * 1024ULL * 1024ULL ;
2016-11-07 10:19:40 +01:00
2017-02-28 10:36:43 +01:00
if ( ! ( adev - > flags & AMD_IS_APU ) ) {
r = amdgpu_device_resize_fb_bar ( adev ) ;
if ( r )
return r ;
}
2018-01-12 14:52:22 +01:00
adev - > gmc . aper_base = pci_resource_start ( adev - > pdev , 0 ) ;
adev - > gmc . aper_size = pci_resource_len ( adev - > pdev , 0 ) ;
2017-02-28 10:36:43 +01:00
2016-11-07 10:19:40 +01:00
# ifdef CONFIG_X86_64
if ( adev - > flags & AMD_IS_APU ) {
2018-01-12 14:52:22 +01:00
adev - > gmc . aper_base = ( ( u64 ) RREG32 ( mmMC_VM_FB_OFFSET ) ) < < 22 ;
adev - > gmc . aper_size = adev - > gmc . real_vram_size ;
2016-11-07 10:19:40 +01:00
}
# endif
2015-04-20 17:31:14 -04:00
2015-12-09 15:36:40 -05:00
/* In case the PCI BAR is larger than the actual amount of vram */
2018-01-12 14:52:22 +01:00
adev - > gmc . visible_vram_size = adev - > gmc . aper_size ;
if ( adev - > gmc . visible_vram_size > adev - > gmc . real_vram_size )
adev - > gmc . visible_vram_size = adev - > gmc . real_vram_size ;
2015-12-09 15:36:40 -05:00
2017-08-22 13:06:30 -04:00
/* set the gart size */
if ( amdgpu_gart_size = = - 1 ) {
switch ( adev - > asic_type ) {
case CHIP_POLARIS10 : /* all engines support GPUVM */
2018-04-11 15:20:35 -05:00
case CHIP_POLARIS11 : /* all engines support GPUVM */
2017-08-22 13:06:30 -04:00
case CHIP_POLARIS12 : /* all engines support GPUVM */
2018-04-11 15:20:35 -05:00
case CHIP_VEGAM : /* all engines support GPUVM */
2017-08-22 13:06:30 -04:00
default :
2018-01-12 14:52:22 +01:00
adev - > gmc . gart_size = 256ULL < < 20 ;
2017-08-22 13:06:30 -04:00
break ;
case CHIP_TONGA : /* UVD, VCE do not support GPUVM */
case CHIP_FIJI : /* UVD, VCE do not support GPUVM */
case CHIP_CARRIZO : /* UVD, VCE do not support GPUVM, DCE SG support */
case CHIP_STONEY : /* UVD does not support GPUVM, DCE SG support */
2018-01-12 14:52:22 +01:00
adev - > gmc . gart_size = 1024ULL < < 20 ;
2017-08-22 13:06:30 -04:00
break ;
}
} else {
2018-01-12 14:52:22 +01:00
adev - > gmc . gart_size = ( u64 ) amdgpu_gart_size < < 20 ;
2017-08-22 13:06:30 -04:00
}
2018-01-12 14:52:22 +01:00
gmc_v8_0_vram_gtt_location ( adev , & adev - > gmc ) ;
2015-04-20 17:31:14 -04:00
return 0 ;
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel .
* VMIDs 1 - 15 are used for userspace clients and are handled
* by the amdgpu vm / hsa code .
*/
/**
2018-01-12 15:26:08 +01:00
* gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
2015-04-20 17:31:14 -04:00
*
* @ adev : amdgpu_device pointer
* @ vmid : vm instance to flush
*
2019-05-01 08:22:50 -04:00
* Flush the TLB for the requested page table ( VI ) .
2015-04-20 17:31:14 -04:00
*/
2019-08-01 14:55:45 -05:00
static void gmc_v8_0_flush_gpu_tlb ( struct amdgpu_device * adev , uint32_t vmid ,
uint32_t vmhub , uint32_t flush_type )
2015-04-20 17:31:14 -04:00
{
/* bits 0-15 are the VM contexts0-15 */
WREG32 ( mmVM_INVALIDATE_REQUEST , 1 < < vmid ) ;
}
2018-01-12 19:14:42 +01:00
static uint64_t gmc_v8_0_emit_flush_gpu_tlb ( struct amdgpu_ring * ring ,
2018-02-04 10:32:35 +01:00
unsigned vmid , uint64_t pd_addr )
2018-01-12 19:14:42 +01:00
{
uint32_t reg ;
if ( vmid < 8 )
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8 ;
amdgpu_ring_emit_wreg ( ring , reg , pd_addr > > 12 ) ;
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg ( ring , mmVM_INVALIDATE_REQUEST , 1 < < vmid ) ;
return pd_addr ;
}
2018-02-04 10:32:35 +01:00
static void gmc_v8_0_emit_pasid_mapping ( struct amdgpu_ring * ring , unsigned vmid ,
unsigned pasid )
{
amdgpu_ring_emit_wreg ( ring , mmIH_VMID_0_LUT + vmid , pasid ) ;
}
2019-02-25 12:56:53 -05:00
/*
* PTE format on VI :
* 63 : 40 reserved
* 39 : 12 4 k physical page base address
* 11 : 7 fragment
* 6 write
* 5 read
* 4 exe
* 3 reserved
* 2 snooped
* 1 system
* 0 valid
2015-04-20 17:31:14 -04:00
*
2019-02-25 12:56:53 -05:00
* PDE format on VI :
* 63 : 59 block fragment size
* 58 : 40 reserved
* 39 : 1 physical base address of PTE
* bits 5 : 1 must be 0.
* 0 valid
2015-04-20 17:31:14 -04:00
*/
2017-11-29 13:27:26 +01:00
static void gmc_v8_0_get_vm_pde ( struct amdgpu_device * adev , int level ,
uint64_t * addr , uint64_t * flags )
2017-05-12 15:39:39 +02:00
{
2017-11-29 13:27:26 +01:00
BUG_ON ( * addr & 0xFFFFFF0000000FFFULL ) ;
2017-05-12 15:39:39 +02:00
}
2019-09-02 16:39:40 +02:00
static void gmc_v8_0_get_vm_pte ( struct amdgpu_device * adev ,
struct amdgpu_bo_va_mapping * mapping ,
uint64_t * flags )
{
* flags & = ~ AMDGPU_PTE_EXECUTABLE ;
* flags | = mapping - > flags & AMDGPU_PTE_EXECUTABLE ;
* flags & = ~ AMDGPU_PTE_PRT ;
}
2015-09-28 12:31:26 +02:00
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
* @ adev : amdgpu_device pointer
* @ value : true redirects VM faults to the default page
*/
static void gmc_v8_0_set_fault_enable_default ( struct amdgpu_device * adev ,
bool value )
{
u32 tmp ;
tmp = RREG32 ( mmVM_CONTEXT1_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
READ_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL ,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT , value ) ;
WREG32 ( mmVM_CONTEXT1_CNTL , tmp ) ;
}
2017-01-18 13:40:48 +01:00
/**
* gmc_v8_0_set_prt - set PRT VM fault
*
* @ adev : amdgpu_device pointer
* @ enable : enable / disable VM fault handling for PRT
*/
static void gmc_v8_0_set_prt ( struct amdgpu_device * adev , bool enable )
{
u32 tmp ;
2018-01-12 14:52:22 +01:00
if ( enable & & ! adev - > gmc . prt_warning ) {
2017-01-18 13:40:48 +01:00
dev_warn ( adev - > dev , " Disabling VM faults because of PRT request! \n " ) ;
2018-01-12 14:52:22 +01:00
adev - > gmc . prt_warning = true ;
2017-01-18 13:40:48 +01:00
}
tmp = RREG32 ( mmVM_PRT_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS , enable ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS , enable ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS , enable ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS , enable ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
L2_CACHE_STORE_INVALID_ENTRIES , enable ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
L1_TLB_STORE_INVALID_ENTRIES , enable ) ;
tmp = REG_SET_FIELD ( tmp , VM_PRT_CNTL ,
MASK_PDE0_FAULT , enable ) ;
WREG32 ( mmVM_PRT_CNTL , tmp ) ;
if ( enable ) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE > > AMDGPU_GPU_PAGE_SHIFT ;
2018-01-22 11:19:50 +01:00
uint32_t high = adev - > vm_manager . max_pfn -
( AMDGPU_VA_RESERVED_SIZE > > AMDGPU_GPU_PAGE_SHIFT ) ;
2017-01-18 13:40:48 +01:00
WREG32 ( mmVM_PRT_APERTURE0_LOW_ADDR , low ) ;
WREG32 ( mmVM_PRT_APERTURE1_LOW_ADDR , low ) ;
WREG32 ( mmVM_PRT_APERTURE2_LOW_ADDR , low ) ;
WREG32 ( mmVM_PRT_APERTURE3_LOW_ADDR , low ) ;
WREG32 ( mmVM_PRT_APERTURE0_HIGH_ADDR , high ) ;
WREG32 ( mmVM_PRT_APERTURE1_HIGH_ADDR , high ) ;
WREG32 ( mmVM_PRT_APERTURE2_HIGH_ADDR , high ) ;
WREG32 ( mmVM_PRT_APERTURE3_HIGH_ADDR , high ) ;
} else {
WREG32 ( mmVM_PRT_APERTURE0_LOW_ADDR , 0xfffffff ) ;
WREG32 ( mmVM_PRT_APERTURE1_LOW_ADDR , 0xfffffff ) ;
WREG32 ( mmVM_PRT_APERTURE2_LOW_ADDR , 0xfffffff ) ;
WREG32 ( mmVM_PRT_APERTURE3_LOW_ADDR , 0xfffffff ) ;
WREG32 ( mmVM_PRT_APERTURE0_HIGH_ADDR , 0x0 ) ;
WREG32 ( mmVM_PRT_APERTURE1_HIGH_ADDR , 0x0 ) ;
WREG32 ( mmVM_PRT_APERTURE2_HIGH_ADDR , 0x0 ) ;
WREG32 ( mmVM_PRT_APERTURE3_HIGH_ADDR , 0x0 ) ;
}
}
2015-04-20 17:31:14 -04:00
/**
* gmc_v8_0_gart_enable - gart enable
*
* @ adev : amdgpu_device pointer
*
* This sets up the TLBs , programs the page tables for VMID0 ,
* sets up the hw for VMIDs 1 - 15 which are allocated on
* demand , and sets up the global locations for the LDS , GDS ,
2019-05-01 08:22:50 -04:00
* and GPUVM for FSA64 clients ( VI ) .
2015-04-20 17:31:14 -04:00
* Returns 0 for success , errors for failure .
*/
static int gmc_v8_0_gart_enable ( struct amdgpu_device * adev )
{
2018-08-28 11:26:17 +02:00
uint64_t table_addr ;
2017-11-21 13:29:14 +08:00
int r , i ;
2017-08-11 20:00:41 +08:00
u32 tmp , field ;
2015-04-20 17:31:14 -04:00
2018-08-21 17:07:47 +02:00
if ( adev - > gart . bo = = NULL ) {
2015-04-20 17:31:14 -04:00
dev_err ( adev - > dev , " No VRAM object for PCIE GART. \n " ) ;
return - EINVAL ;
}
2017-11-21 13:29:14 +08:00
r = amdgpu_gart_table_vram_pin ( adev ) ;
if ( r )
return r ;
2018-08-28 11:26:17 +02:00
table_addr = amdgpu_bo_gpu_offset ( adev - > gart . bo ) ;
2015-04-20 17:31:14 -04:00
/* Setup TLB control */
tmp = RREG32 ( mmMC_VM_MX_L1_TLB_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , ENABLE_L1_TLB , 1 ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , ENABLE_L1_FRAGMENT_PROCESSING , 1 ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , SYSTEM_ACCESS_MODE , 3 ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , ENABLE_ADVANCED_DRIVER_MODEL , 1 ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , SYSTEM_APERTURE_UNMAPPED_ACCESS , 0 ) ;
WREG32 ( mmMC_VM_MX_L1_TLB_CNTL , tmp ) ;
/* Setup L2 cache */
tmp = RREG32 ( mmVM_L2_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , ENABLE_L2_CACHE , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , ENABLE_L2_FRAGMENT_PROCESSING , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , EFFECTIVE_L2_QUEUE_SIZE , 7 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , CONTEXT1_IDENTITY_ACCESS_MODE , 1 ) ;
2015-11-05 13:06:15 -06:00
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY , 1 ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmVM_L2_CNTL , tmp ) ;
tmp = RREG32 ( mmVM_L2_CNTL2 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL2 , INVALIDATE_ALL_L1_TLBS , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL2 , INVALIDATE_L2_CACHE , 1 ) ;
WREG32 ( mmVM_L2_CNTL2 , tmp ) ;
2017-08-11 20:00:41 +08:00
field = adev - > vm_manager . fragment_size ;
2015-04-20 17:31:14 -04:00
tmp = RREG32 ( mmVM_L2_CNTL3 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL3 , L2_CACHE_BIGK_ASSOCIATIVITY , 1 ) ;
2017-08-11 20:00:41 +08:00
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL3 , BANK_SELECT , field ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL3 , L2_CACHE_BIGK_FRAGMENT_SIZE , field ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmVM_L2_CNTL3 , tmp ) ;
/* XXX: set to enable PTE/PDE in system memory */
tmp = RREG32 ( mmVM_L2_CNTL4 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL4 , VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP , 0 ) ;
WREG32 ( mmVM_L2_CNTL4 , tmp ) ;
/* setup context0 */
2018-01-12 14:52:22 +01:00
WREG32 ( mmVM_CONTEXT0_PAGE_TABLE_START_ADDR , adev - > gmc . gart_start > > 12 ) ;
WREG32 ( mmVM_CONTEXT0_PAGE_TABLE_END_ADDR , adev - > gmc . gart_end > > 12 ) ;
2018-08-21 17:18:22 +02:00
WREG32 ( mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR , table_addr > > 12 ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR ,
2018-02-22 08:35:11 +01:00
( u32 ) ( adev - > dummy_page_addr > > 12 ) ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmVM_CONTEXT0_CNTL2 , 0 ) ;
tmp = RREG32 ( mmVM_CONTEXT0_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT0_CNTL , ENABLE_CONTEXT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT0_CNTL , PAGE_TABLE_DEPTH , 0 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT0_CNTL , RANGE_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
WREG32 ( mmVM_CONTEXT0_CNTL , tmp ) ;
WREG32 ( mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR , 0 ) ;
WREG32 ( mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR , 0 ) ;
WREG32 ( mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET , 0 ) ;
/* empty context1-15 */
/* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32 ( mmVM_CONTEXT1_PAGE_TABLE_START_ADDR , 0 ) ;
2015-05-13 14:21:06 +02:00
WREG32 ( mmVM_CONTEXT1_PAGE_TABLE_END_ADDR , adev - > vm_manager . max_pfn - 1 ) ;
2015-04-20 17:31:14 -04:00
for ( i = 1 ; i < 16 ; i + + ) {
if ( i < 8 )
WREG32 ( mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ,
2018-08-21 17:18:22 +02:00
table_addr > > 12 ) ;
2015-04-20 17:31:14 -04:00
else
WREG32 ( mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8 ,
2018-08-21 17:18:22 +02:00
table_addr > > 12 ) ;
2015-04-20 17:31:14 -04:00
}
/* enable context1-15 */
WREG32 ( mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR ,
2018-02-22 08:35:11 +01:00
( u32 ) ( adev - > dummy_page_addr > > 12 ) ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmVM_CONTEXT1_CNTL2 , 4 ) ;
tmp = RREG32 ( mmVM_CONTEXT1_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , ENABLE_CONTEXT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , PAGE_TABLE_DEPTH , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , RANGE_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , PDE0_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , VALID_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , READ_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , WRITE_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT , 1 ) ;
tmp = REG_SET_FIELD ( tmp , VM_CONTEXT1_CNTL , PAGE_TABLE_BLOCK_SIZE ,
2017-03-29 16:08:32 +08:00
adev - > vm_manager . block_size - 9 ) ;
2015-04-20 17:31:14 -04:00
WREG32 ( mmVM_CONTEXT1_CNTL , tmp ) ;
2015-09-28 12:31:26 +02:00
if ( amdgpu_vm_fault_stop = = AMDGPU_VM_FAULT_STOP_ALWAYS )
gmc_v8_0_set_fault_enable_default ( adev , false ) ;
else
gmc_v8_0_set_fault_enable_default ( adev , true ) ;
2015-04-20 17:31:14 -04:00
2019-08-01 14:55:45 -05:00
gmc_v8_0_flush_gpu_tlb ( adev , 0 , 0 , 0 ) ;
2015-04-20 17:31:14 -04:00
DRM_INFO ( " PCIE GART of %uM enabled (table at 0x%016llX). \n " ,
2018-01-12 14:52:22 +01:00
( unsigned ) ( adev - > gmc . gart_size > > 20 ) ,
2018-08-21 17:18:22 +02:00
( unsigned long long ) table_addr ) ;
2015-04-20 17:31:14 -04:00
adev - > gart . ready = true ;
return 0 ;
}
static int gmc_v8_0_gart_init ( struct amdgpu_device * adev )
{
int r ;
2018-08-21 17:07:47 +02:00
if ( adev - > gart . bo ) {
2015-04-20 17:31:14 -04:00
WARN ( 1 , " R600 PCIE GART already initialized \n " ) ;
return 0 ;
}
/* Initialize common gart structure */
r = amdgpu_gart_init ( adev ) ;
if ( r )
return r ;
adev - > gart . table_size = adev - > gart . num_gpu_pages * 8 ;
2017-02-14 12:31:36 -05:00
adev - > gart . gart_pte_flags = AMDGPU_PTE_EXECUTABLE ;
2015-04-20 17:31:14 -04:00
return amdgpu_gart_table_vram_alloc ( adev ) ;
}
/**
* gmc_v8_0_gart_disable - gart disable
*
* @ adev : amdgpu_device pointer
*
2019-05-01 08:22:50 -04:00
* This disables all VM page table ( VI ) .
2015-04-20 17:31:14 -04:00
*/
static void gmc_v8_0_gart_disable ( struct amdgpu_device * adev )
{
u32 tmp ;
/* Disable all tables */
WREG32 ( mmVM_CONTEXT0_CNTL , 0 ) ;
WREG32 ( mmVM_CONTEXT1_CNTL , 0 ) ;
/* Setup TLB control */
tmp = RREG32 ( mmMC_VM_MX_L1_TLB_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , ENABLE_L1_TLB , 0 ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , ENABLE_L1_FRAGMENT_PROCESSING , 0 ) ;
tmp = REG_SET_FIELD ( tmp , MC_VM_MX_L1_TLB_CNTL , ENABLE_ADVANCED_DRIVER_MODEL , 0 ) ;
WREG32 ( mmMC_VM_MX_L1_TLB_CNTL , tmp ) ;
/* Setup L2 cache */
tmp = RREG32 ( mmVM_L2_CNTL ) ;
tmp = REG_SET_FIELD ( tmp , VM_L2_CNTL , ENABLE_L2_CACHE , 0 ) ;
WREG32 ( mmVM_L2_CNTL , tmp ) ;
WREG32 ( mmVM_L2_CNTL2 , 0 ) ;
2017-11-21 13:29:14 +08:00
amdgpu_gart_table_vram_unpin ( adev ) ;
2015-04-20 17:31:14 -04:00
}
/**
* gmc_v8_0_vm_decode_fault - print human readable fault info
*
* @ adev : amdgpu_device pointer
* @ status : VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @ addr : VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
*
2019-05-01 08:22:50 -04:00
* Print human readable fault information ( VI ) .
2015-04-20 17:31:14 -04:00
*/
2018-01-09 19:50:01 +01:00
static void gmc_v8_0_vm_decode_fault ( struct amdgpu_device * adev , u32 status ,
u32 addr , u32 mc_client , unsigned pasid )
2015-04-20 17:31:14 -04:00
{
u32 vmid = REG_GET_FIELD ( status , VM_CONTEXT1_PROTECTION_FAULT_STATUS , VMID ) ;
u32 protections = REG_GET_FIELD ( status , VM_CONTEXT1_PROTECTION_FAULT_STATUS ,
PROTECTIONS ) ;
char block [ 5 ] = { mc_client > > 24 , ( mc_client > > 16 ) & 0xff ,
( mc_client > > 8 ) & 0xff , mc_client & 0xff , 0 } ;
2018-01-09 19:50:01 +01:00
u32 mc_id ;
2015-04-20 17:31:14 -04:00
mc_id = REG_GET_FIELD ( status , VM_CONTEXT1_PROTECTION_FAULT_STATUS ,
MEMORY_CLIENT_ID ) ;
2018-01-09 19:50:01 +01:00
dev_err ( adev - > dev , " VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d) \n " ,
protections , vmid , pasid , addr ,
2015-04-20 17:31:14 -04:00
REG_GET_FIELD ( status , VM_CONTEXT1_PROTECTION_FAULT_STATUS ,
MEMORY_CLIENT_RW ) ?
" write " : " read " , block , mc_client , mc_id ) ;
}
2015-06-03 21:02:01 +08:00
static int gmc_v8_0_convert_vram_type ( int mc_seq_vram_type )
{
switch ( mc_seq_vram_type ) {
case MC_SEQ_MISC0__MT__GDDR1 :
return AMDGPU_VRAM_TYPE_GDDR1 ;
case MC_SEQ_MISC0__MT__DDR2 :
return AMDGPU_VRAM_TYPE_DDR2 ;
case MC_SEQ_MISC0__MT__GDDR3 :
return AMDGPU_VRAM_TYPE_GDDR3 ;
case MC_SEQ_MISC0__MT__GDDR4 :
return AMDGPU_VRAM_TYPE_GDDR4 ;
case MC_SEQ_MISC0__MT__GDDR5 :
return AMDGPU_VRAM_TYPE_GDDR5 ;
case MC_SEQ_MISC0__MT__HBM :
return AMDGPU_VRAM_TYPE_HBM ;
case MC_SEQ_MISC0__MT__DDR3 :
return AMDGPU_VRAM_TYPE_DDR3 ;
default :
return AMDGPU_VRAM_TYPE_UNKNOWN ;
}
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_early_init ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-01-12 15:26:08 +01:00
gmc_v8_0_set_gmc_funcs ( adev ) ;
2015-04-20 17:31:14 -04:00
gmc_v8_0_set_irq_funcs ( adev ) ;
2018-01-12 14:52:22 +01:00
adev - > gmc . shared_aperture_start = 0x2000000000000000ULL ;
adev - > gmc . shared_aperture_end =
adev - > gmc . shared_aperture_start + ( 4ULL < < 30 ) - 1 ;
adev - > gmc . private_aperture_start =
adev - > gmc . shared_aperture_end + 1 ;
adev - > gmc . private_aperture_end =
adev - > gmc . private_aperture_start + ( 4ULL < < 30 ) - 1 ;
2016-03-10 14:20:39 +08:00
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-09-04 18:48:29 +02:00
static int gmc_v8_0_late_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-04-06 14:54:10 -05:00
amdgpu_bo_late_init ( adev ) ;
2016-04-25 16:06:17 +08:00
if ( amdgpu_vm_fault_stop ! = AMDGPU_VM_FAULT_STOP_ALWAYS )
2018-01-12 14:52:22 +01:00
return amdgpu_irq_get ( adev , & adev - > gmc . vm_fault , 0 ) ;
2016-04-25 16:06:17 +08:00
else
return 0 ;
2015-09-04 18:48:29 +02:00
}
2018-04-06 14:54:09 -05:00
static unsigned gmc_v8_0_get_vbios_fb_size ( struct amdgpu_device * adev )
{
u32 d1vga_control = RREG32 ( mmD1VGA_CONTROL ) ;
unsigned size ;
if ( REG_GET_FIELD ( d1vga_control , D1VGA_CONTROL , D1VGA_MODE_ENABLE ) ) {
size = 9 * 1024 * 1024 ; /* reserve 8MB for vga emulator and 1 MB for FB */
} else {
u32 viewport = RREG32 ( mmVIEWPORT_SIZE ) ;
size = ( REG_GET_FIELD ( viewport , VIEWPORT_SIZE , VIEWPORT_HEIGHT ) *
REG_GET_FIELD ( viewport , VIEWPORT_SIZE , VIEWPORT_WIDTH ) *
4 ) ;
}
/* return 0 if the pre-OS buffer uses up most of vram */
if ( ( adev - > gmc . real_vram_size - size ) < ( 8 * 1024 * 1024 ) )
return 0 ;
return size ;
}
2016-03-31 16:41:32 -04:00
# define mmMC_SEQ_MISC0_FIJI 0xA71
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_sw_init ( void * handle )
2015-04-20 17:31:14 -04:00
{
int r ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
2019-08-23 09:42:33 -05:00
adev - > num_vmhubs = 1 ;
2016-03-31 16:07:38 -04:00
if ( adev - > flags & AMD_IS_APU ) {
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_type = AMDGPU_VRAM_TYPE_UNKNOWN ;
2016-03-31 16:07:38 -04:00
} else {
2016-03-31 16:41:32 -04:00
u32 tmp ;
2018-04-11 15:20:35 -05:00
if ( ( adev - > asic_type = = CHIP_FIJI ) | |
( adev - > asic_type = = CHIP_VEGAM ) )
2016-03-31 16:41:32 -04:00
tmp = RREG32 ( mmMC_SEQ_MISC0_FIJI ) ;
else
tmp = RREG32 ( mmMC_SEQ_MISC0 ) ;
2016-03-31 16:07:38 -04:00
tmp & = MC_SEQ_MISC0__MT__MASK ;
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_type = gmc_v8_0_convert_vram_type ( tmp ) ;
2016-03-31 16:07:38 -04:00
}
2018-09-17 15:29:28 +02:00
r = amdgpu_irq_add_id ( adev , AMDGPU_IRQ_CLIENTID_LEGACY , VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT , & adev - > gmc . vm_fault ) ;
2015-04-20 17:31:14 -04:00
if ( r )
return r ;
2018-09-17 15:29:28 +02:00
r = amdgpu_irq_add_id ( adev , AMDGPU_IRQ_CLIENTID_LEGACY , VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT , & adev - > gmc . vm_fault ) ;
2015-04-20 17:31:14 -04:00
if ( r )
return r ;
/* Adjust VM size here.
* Currently set to 4 GB ( ( 1 < < 20 ) 4 k pages ) .
* Max GPUVM size for cayman and SI is 40 bits .
*/
2017-11-23 12:57:18 +01:00
amdgpu_vm_adjust_size ( adev , 64 , 9 , 1 , 40 ) ;
2017-03-29 16:08:32 +08:00
2015-04-20 17:31:14 -04:00
/* Set the internal MC address mask
* This is the max address of the GPU ' s
* internal address space .
*/
2018-01-12 14:52:22 +01:00
adev - > gmc . mc_mask = 0xffffffffffULL ; /* 40 bit MC */
2015-04-20 17:31:14 -04:00
2019-08-15 09:27:03 +02:00
r = dma_set_mask_and_coherent ( adev - > dev , DMA_BIT_MASK ( 40 ) ) ;
2015-04-20 17:31:14 -04:00
if ( r ) {
2017-02-28 04:55:52 -08:00
pr_warn ( " amdgpu: No suitable DMA available \n " ) ;
2019-08-15 09:27:03 +02:00
return r ;
2015-04-20 17:31:14 -04:00
}
2019-08-15 09:27:03 +02:00
adev - > need_swiotlb = drm_need_swiotlb ( 40 ) ;
2015-04-20 17:31:14 -04:00
r = gmc_v8_0_init_microcode ( adev ) ;
if ( r ) {
DRM_ERROR ( " Failed to load mc firmware! \n " ) ;
return r ;
}
r = gmc_v8_0_mc_init ( adev ) ;
if ( r )
return r ;
2018-04-06 14:54:09 -05:00
adev - > gmc . stolen_size = gmc_v8_0_get_vbios_fb_size ( adev ) ;
2015-04-20 17:31:14 -04:00
/* Memory manager */
r = amdgpu_bo_init ( adev ) ;
if ( r )
return r ;
r = gmc_v8_0_gart_init ( adev ) ;
if ( r )
return r ;
2017-05-11 16:21:20 +02:00
/*
* number of VMs
* VMID 0 is reserved for System
* amdgpu graphics / compute will use VMIDs 1 - 7
* amdkfd will use VMIDs 8 - 15
*/
adev - > vm_manager . id_mgr [ 0 ] . num_ids = AMDGPU_NUM_OF_VMIDS ;
amdgpu_vm_manager_init ( adev ) ;
/* base offset of vram pages */
if ( adev - > flags & AMD_IS_APU ) {
u64 tmp = RREG32 ( mmMC_VM_FB_OFFSET ) ;
tmp < < = 22 ;
adev - > vm_manager . vram_base_offset = tmp ;
} else {
adev - > vm_manager . vram_base_offset = 0 ;
2015-04-20 17:31:14 -04:00
}
2018-07-11 22:32:49 -04:00
adev - > gmc . vm_fault_info = kmalloc ( sizeof ( struct kfd_vm_fault_info ) ,
GFP_KERNEL ) ;
if ( ! adev - > gmc . vm_fault_info )
return - ENOMEM ;
atomic_set ( & adev - > gmc . vm_fault_info_updated , 0 ) ;
2017-05-11 16:21:20 +02:00
return 0 ;
2015-04-20 17:31:14 -04:00
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_sw_fini ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
2017-11-14 11:55:50 +08:00
amdgpu_gem_force_release ( adev ) ;
2017-05-11 16:21:20 +02:00
amdgpu_vm_manager_fini ( adev ) ;
2018-07-11 22:32:49 -04:00
kfree ( adev - > gmc . vm_fault_info ) ;
2018-08-22 10:07:35 -04:00
amdgpu_gart_table_vram_free ( adev ) ;
2015-04-20 17:31:14 -04:00
amdgpu_bo_fini ( adev ) ;
2018-08-22 10:07:35 -04:00
amdgpu_gart_fini ( adev ) ;
2018-01-12 14:52:22 +01:00
release_firmware ( adev - > gmc . fw ) ;
adev - > gmc . fw = NULL ;
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_hw_init ( void * handle )
2015-04-20 17:31:14 -04:00
{
int r ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
gmc_v8_0_init_golden_registers ( adev ) ;
gmc_v8_0_mc_program ( adev ) ;
2016-02-02 10:57:30 -05:00
if ( adev - > asic_type = = CHIP_TONGA ) {
2017-03-21 12:51:48 +08:00
r = gmc_v8_0_tonga_mc_load_microcode ( adev ) ;
if ( r ) {
DRM_ERROR ( " Failed to load MC firmware! \n " ) ;
return r ;
}
} else if ( adev - > asic_type = = CHIP_POLARIS11 | |
adev - > asic_type = = CHIP_POLARIS10 | |
adev - > asic_type = = CHIP_POLARIS12 ) {
r = gmc_v8_0_polaris_mc_load_microcode ( adev ) ;
2015-04-20 17:31:14 -04:00
if ( r ) {
DRM_ERROR ( " Failed to load MC firmware! \n " ) ;
return r ;
}
}
r = gmc_v8_0_gart_enable ( adev ) ;
if ( r )
return r ;
return r ;
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_hw_fini ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-01-12 14:52:22 +01:00
amdgpu_irq_put ( adev , & adev - > gmc . vm_fault , 0 ) ;
2015-04-20 17:31:14 -04:00
gmc_v8_0_gart_disable ( adev ) ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_suspend ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
gmc_v8_0_hw_fini ( adev ) ;
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_resume ( void * handle )
2015-04-20 17:31:14 -04:00
{
int r ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
r = gmc_v8_0_hw_init ( adev ) ;
if ( r )
return r ;
2017-12-18 16:53:03 +01:00
amdgpu_vmid_reset_all ( adev ) ;
2015-04-20 17:31:14 -04:00
2017-05-10 20:06:58 +02:00
return 0 ;
2015-04-20 17:31:14 -04:00
}
2015-05-22 14:39:35 -04:00
static bool gmc_v8_0_is_idle ( void * handle )
2015-04-20 17:31:14 -04:00
{
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
u32 tmp = RREG32 ( mmSRBM_STATUS ) ;
if ( tmp & ( SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK ) )
return false ;
return true ;
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_wait_for_idle ( void * handle )
2015-04-20 17:31:14 -04:00
{
unsigned i ;
u32 tmp ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
/* read MC_STATUS */
tmp = RREG32 ( mmSRBM_STATUS ) & ( SRBM_STATUS__MCB_BUSY_MASK |
SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK |
SRBM_STATUS__MCD_BUSY_MASK |
SRBM_STATUS__VMC_BUSY_MASK |
SRBM_STATUS__VMC1_BUSY_MASK ) ;
if ( ! tmp )
return 0 ;
udelay ( 1 ) ;
}
return - ETIMEDOUT ;
}
2016-10-13 16:07:03 -04:00
static bool gmc_v8_0_check_soft_reset ( void * handle )
2015-04-20 17:31:14 -04:00
{
u32 srbm_soft_reset = 0 ;
2015-05-22 14:39:35 -04:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2015-04-20 17:31:14 -04:00
u32 tmp = RREG32 ( mmSRBM_STATUS ) ;
if ( tmp & SRBM_STATUS__VMC_BUSY_MASK )
srbm_soft_reset = REG_SET_FIELD ( srbm_soft_reset ,
SRBM_SOFT_RESET , SOFT_RESET_VMC , 1 ) ;
if ( tmp & ( SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK ) ) {
2015-07-22 11:29:01 +08:00
if ( ! ( adev - > flags & AMD_IS_APU ) )
2015-04-20 17:31:14 -04:00
srbm_soft_reset = REG_SET_FIELD ( srbm_soft_reset ,
SRBM_SOFT_RESET , SOFT_RESET_MC , 1 ) ;
}
if ( srbm_soft_reset ) {
2018-01-12 14:52:22 +01:00
adev - > gmc . srbm_soft_reset = srbm_soft_reset ;
2016-10-13 16:07:03 -04:00
return true ;
2016-07-18 16:59:24 +08:00
} else {
2018-01-12 14:52:22 +01:00
adev - > gmc . srbm_soft_reset = 0 ;
2016-10-13 16:07:03 -04:00
return false ;
2016-07-18 16:59:24 +08:00
}
}
2015-04-20 17:31:14 -04:00
2016-07-18 16:59:24 +08:00
static int gmc_v8_0_pre_soft_reset ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-01-12 14:52:22 +01:00
if ( ! adev - > gmc . srbm_soft_reset )
2016-07-18 16:59:24 +08:00
return 0 ;
2016-12-08 14:53:27 -05:00
gmc_v8_0_mc_stop ( adev ) ;
2016-07-18 16:59:24 +08:00
if ( gmc_v8_0_wait_for_idle ( adev ) ) {
dev_warn ( adev - > dev , " Wait for GMC idle timed out ! \n " ) ;
}
return 0 ;
}
2015-04-20 17:31:14 -04:00
2016-07-18 16:59:24 +08:00
static int gmc_v8_0_soft_reset ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
u32 srbm_soft_reset ;
2018-01-12 14:52:22 +01:00
if ( ! adev - > gmc . srbm_soft_reset )
2016-07-18 16:59:24 +08:00
return 0 ;
2018-01-12 14:52:22 +01:00
srbm_soft_reset = adev - > gmc . srbm_soft_reset ;
2016-07-18 16:59:24 +08:00
if ( srbm_soft_reset ) {
u32 tmp ;
2015-04-20 17:31:14 -04:00
tmp = RREG32 ( mmSRBM_SOFT_RESET ) ;
tmp | = srbm_soft_reset ;
dev_info ( adev - > dev , " SRBM_SOFT_RESET=0x%08X \n " , tmp ) ;
WREG32 ( mmSRBM_SOFT_RESET , tmp ) ;
tmp = RREG32 ( mmSRBM_SOFT_RESET ) ;
udelay ( 50 ) ;
tmp & = ~ srbm_soft_reset ;
WREG32 ( mmSRBM_SOFT_RESET , tmp ) ;
tmp = RREG32 ( mmSRBM_SOFT_RESET ) ;
/* Wait a little for things to settle down */
udelay ( 50 ) ;
}
return 0 ;
}
2016-07-18 16:59:24 +08:00
static int gmc_v8_0_post_soft_reset ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-01-12 14:52:22 +01:00
if ( ! adev - > gmc . srbm_soft_reset )
2016-07-18 16:59:24 +08:00
return 0 ;
2016-12-08 14:53:27 -05:00
gmc_v8_0_mc_resume ( adev ) ;
2016-07-18 16:59:24 +08:00
return 0 ;
}
2015-04-20 17:31:14 -04:00
static int gmc_v8_0_vm_fault_interrupt_state ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * src ,
unsigned type ,
enum amdgpu_interrupt_state state )
{
u32 tmp ;
u32 bits = ( VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK ) ;
switch ( state ) {
case AMDGPU_IRQ_STATE_DISABLE :
/* system context */
tmp = RREG32 ( mmVM_CONTEXT0_CNTL ) ;
tmp & = ~ bits ;
WREG32 ( mmVM_CONTEXT0_CNTL , tmp ) ;
/* VMs */
tmp = RREG32 ( mmVM_CONTEXT1_CNTL ) ;
tmp & = ~ bits ;
WREG32 ( mmVM_CONTEXT1_CNTL , tmp ) ;
break ;
case AMDGPU_IRQ_STATE_ENABLE :
/* system context */
tmp = RREG32 ( mmVM_CONTEXT0_CNTL ) ;
tmp | = bits ;
WREG32 ( mmVM_CONTEXT0_CNTL , tmp ) ;
/* VMs */
tmp = RREG32 ( mmVM_CONTEXT1_CNTL ) ;
tmp | = bits ;
WREG32 ( mmVM_CONTEXT1_CNTL , tmp ) ;
break ;
default :
break ;
}
return 0 ;
}
static int gmc_v8_0_process_interrupt ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * source ,
struct amdgpu_iv_entry * entry )
{
2018-07-11 22:32:49 -04:00
u32 addr , status , mc_client , vmid ;
2015-04-20 17:31:14 -04:00
2017-02-06 17:32:22 +08:00
if ( amdgpu_sriov_vf ( adev ) ) {
dev_err ( adev - > dev , " GPU fault detected: %d 0x%08x \n " ,
2016-11-29 18:02:12 -05:00
entry - > src_id , entry - > src_data [ 0 ] ) ;
2017-02-06 17:32:22 +08:00
dev_err ( adev - > dev , " Can't decode VM fault info here on SRIOV VF \n " ) ;
return 0 ;
}
2015-04-20 17:31:14 -04:00
addr = RREG32 ( mmVM_CONTEXT1_PROTECTION_FAULT_ADDR ) ;
status = RREG32 ( mmVM_CONTEXT1_PROTECTION_FAULT_STATUS ) ;
mc_client = RREG32 ( mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT ) ;
2015-09-10 15:00:39 +02:00
/* reset addr and status */
WREG32_P ( mmVM_CONTEXT1_CNTL2 , 1 , ~ 1 ) ;
if ( ! addr & & ! status )
return 0 ;
2015-09-28 12:31:26 +02:00
if ( amdgpu_vm_fault_stop = = AMDGPU_VM_FAULT_STOP_FIRST )
gmc_v8_0_set_fault_enable_default ( adev , false ) ;
2016-11-07 15:35:09 +11:00
if ( printk_ratelimit ( ) ) {
2018-12-20 16:04:35 +05:30
struct amdgpu_task_info task_info ;
2018-06-28 22:55:27 -04:00
2018-12-20 16:04:35 +05:30
memset ( & task_info , 0 , sizeof ( struct amdgpu_task_info ) ) ;
2018-06-28 22:55:27 -04:00
amdgpu_vm_get_task_info ( adev , entry - > pasid , & task_info ) ;
dev_err ( adev - > dev , " GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d \n " ,
entry - > src_id , entry - > src_data [ 0 ] , task_info . process_name ,
task_info . tgid , task_info . task_name , task_info . pid ) ;
2016-11-07 15:35:09 +11:00
dev_err ( adev - > dev , " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X \n " ,
addr ) ;
dev_err ( adev - > dev , " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X \n " ,
status ) ;
2018-01-09 19:50:01 +01:00
gmc_v8_0_vm_decode_fault ( adev , status , addr , mc_client ,
entry - > pasid ) ;
2016-11-07 15:35:09 +11:00
}
2015-04-20 17:31:14 -04:00
2018-07-11 22:32:49 -04:00
vmid = REG_GET_FIELD ( status , VM_CONTEXT1_PROTECTION_FAULT_STATUS ,
VMID ) ;
if ( amdgpu_amdkfd_is_kfd_vmid ( adev , vmid )
& & ! atomic_read ( & adev - > gmc . vm_fault_info_updated ) ) {
struct kfd_vm_fault_info * info = adev - > gmc . vm_fault_info ;
u32 protections = REG_GET_FIELD ( status ,
VM_CONTEXT1_PROTECTION_FAULT_STATUS ,
PROTECTIONS ) ;
info - > vmid = vmid ;
info - > mc_id = REG_GET_FIELD ( status ,
VM_CONTEXT1_PROTECTION_FAULT_STATUS ,
MEMORY_CLIENT_ID ) ;
info - > status = status ;
info - > page_addr = addr ;
info - > prot_valid = protections & 0x7 ? true : false ;
info - > prot_read = protections & 0x8 ? true : false ;
info - > prot_write = protections & 0x10 ? true : false ;
info - > prot_exec = protections & 0x20 ? true : false ;
mb ( ) ;
atomic_set ( & adev - > gmc . vm_fault_info_updated , 1 ) ;
}
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-11-10 11:27:39 -05:00
static void fiji_update_mc_medium_grain_clock_gating ( struct amdgpu_device * adev ,
2016-04-08 01:01:18 -04:00
bool enable )
2015-11-10 11:27:39 -05:00
{
uint32_t data ;
2016-04-08 01:01:18 -04:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_MC_MGCG ) ) {
2015-11-10 11:27:39 -05:00
data = RREG32 ( mmMC_HUB_MISC_HUB_CG ) ;
data | = MC_HUB_MISC_HUB_CG__ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_HUB_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_SIP_CG ) ;
data | = MC_HUB_MISC_SIP_CG__ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_SIP_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_VM_CG ) ;
data | = MC_HUB_MISC_VM_CG__ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_VM_CG , data ) ;
data = RREG32 ( mmMC_XPB_CLK_GAT ) ;
data | = MC_XPB_CLK_GAT__ENABLE_MASK ;
WREG32 ( mmMC_XPB_CLK_GAT , data ) ;
data = RREG32 ( mmATC_MISC_CG ) ;
data | = ATC_MISC_CG__ENABLE_MASK ;
WREG32 ( mmATC_MISC_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_WR_CG ) ;
data | = MC_CITF_MISC_WR_CG__ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_WR_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_RD_CG ) ;
data | = MC_CITF_MISC_RD_CG__ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_RD_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_VM_CG ) ;
data | = MC_CITF_MISC_VM_CG__ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_VM_CG , data ) ;
data = RREG32 ( mmVM_L2_CG ) ;
data | = VM_L2_CG__ENABLE_MASK ;
WREG32 ( mmVM_L2_CG , data ) ;
} else {
data = RREG32 ( mmMC_HUB_MISC_HUB_CG ) ;
data & = ~ MC_HUB_MISC_HUB_CG__ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_HUB_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_SIP_CG ) ;
data & = ~ MC_HUB_MISC_SIP_CG__ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_SIP_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_VM_CG ) ;
data & = ~ MC_HUB_MISC_VM_CG__ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_VM_CG , data ) ;
data = RREG32 ( mmMC_XPB_CLK_GAT ) ;
data & = ~ MC_XPB_CLK_GAT__ENABLE_MASK ;
WREG32 ( mmMC_XPB_CLK_GAT , data ) ;
data = RREG32 ( mmATC_MISC_CG ) ;
data & = ~ ATC_MISC_CG__ENABLE_MASK ;
WREG32 ( mmATC_MISC_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_WR_CG ) ;
data & = ~ MC_CITF_MISC_WR_CG__ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_WR_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_RD_CG ) ;
data & = ~ MC_CITF_MISC_RD_CG__ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_RD_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_VM_CG ) ;
data & = ~ MC_CITF_MISC_VM_CG__ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_VM_CG , data ) ;
data = RREG32 ( mmVM_L2_CG ) ;
data & = ~ VM_L2_CG__ENABLE_MASK ;
WREG32 ( mmVM_L2_CG , data ) ;
}
}
static void fiji_update_mc_light_sleep ( struct amdgpu_device * adev ,
2016-04-08 01:01:18 -04:00
bool enable )
2015-11-10 11:27:39 -05:00
{
uint32_t data ;
2016-04-08 01:01:18 -04:00
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_MC_LS ) ) {
2015-11-10 11:27:39 -05:00
data = RREG32 ( mmMC_HUB_MISC_HUB_CG ) ;
data | = MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_HUB_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_SIP_CG ) ;
data | = MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_SIP_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_VM_CG ) ;
data | = MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_VM_CG , data ) ;
data = RREG32 ( mmMC_XPB_CLK_GAT ) ;
data | = MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_XPB_CLK_GAT , data ) ;
data = RREG32 ( mmATC_MISC_CG ) ;
data | = ATC_MISC_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmATC_MISC_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_WR_CG ) ;
data | = MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_WR_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_RD_CG ) ;
data | = MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_RD_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_VM_CG ) ;
data | = MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_VM_CG , data ) ;
data = RREG32 ( mmVM_L2_CG ) ;
data | = VM_L2_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmVM_L2_CG , data ) ;
} else {
data = RREG32 ( mmMC_HUB_MISC_HUB_CG ) ;
data & = ~ MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_HUB_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_SIP_CG ) ;
data & = ~ MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_SIP_CG , data ) ;
data = RREG32 ( mmMC_HUB_MISC_VM_CG ) ;
data & = ~ MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_HUB_MISC_VM_CG , data ) ;
data = RREG32 ( mmMC_XPB_CLK_GAT ) ;
data & = ~ MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_XPB_CLK_GAT , data ) ;
data = RREG32 ( mmATC_MISC_CG ) ;
data & = ~ ATC_MISC_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmATC_MISC_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_WR_CG ) ;
data & = ~ MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_WR_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_RD_CG ) ;
data & = ~ MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_RD_CG , data ) ;
data = RREG32 ( mmMC_CITF_MISC_VM_CG ) ;
data & = ~ MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmMC_CITF_MISC_VM_CG , data ) ;
data = RREG32 ( mmVM_L2_CG ) ;
data & = ~ VM_L2_CG__MEM_LS_ENABLE_MASK ;
WREG32 ( mmVM_L2_CG , data ) ;
}
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state )
2015-04-20 17:31:14 -04:00
{
2015-11-10 11:27:39 -05:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2017-01-23 10:49:33 +08:00
if ( amdgpu_sriov_vf ( adev ) )
return 0 ;
2015-11-10 11:27:39 -05:00
switch ( adev - > asic_type ) {
case CHIP_FIJI :
fiji_update_mc_medium_grain_clock_gating ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2015-11-10 11:27:39 -05:00
fiji_update_mc_light_sleep ( adev ,
2017-03-15 11:20:23 -05:00
state = = AMD_CG_STATE_GATE ) ;
2015-11-10 11:27:39 -05:00
break ;
default :
break ;
}
2015-04-20 17:31:14 -04:00
return 0 ;
}
2015-05-22 14:39:35 -04:00
static int gmc_v8_0_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
2015-04-20 17:31:14 -04:00
{
return 0 ;
}
2017-01-05 20:03:27 +08:00
static void gmc_v8_0_get_clockgating_state ( void * handle , u32 * flags )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
int data ;
2017-01-23 10:49:33 +08:00
if ( amdgpu_sriov_vf ( adev ) )
* flags = 0 ;
2017-01-05 20:03:27 +08:00
/* AMD_CG_SUPPORT_MC_MGCG */
data = RREG32 ( mmMC_HUB_MISC_HUB_CG ) ;
if ( data & MC_HUB_MISC_HUB_CG__ENABLE_MASK )
* flags | = AMD_CG_SUPPORT_MC_MGCG ;
/* AMD_CG_SUPPORT_MC_LS */
if ( data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK )
* flags | = AMD_CG_SUPPORT_MC_LS ;
}
2016-10-13 17:41:13 -04:00
static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
2016-05-04 14:28:35 -04:00
. name = " gmc_v8_0 " ,
2015-04-20 17:31:14 -04:00
. early_init = gmc_v8_0_early_init ,
2015-09-04 18:48:29 +02:00
. late_init = gmc_v8_0_late_init ,
2015-04-20 17:31:14 -04:00
. sw_init = gmc_v8_0_sw_init ,
. sw_fini = gmc_v8_0_sw_fini ,
. hw_init = gmc_v8_0_hw_init ,
. hw_fini = gmc_v8_0_hw_fini ,
. suspend = gmc_v8_0_suspend ,
. resume = gmc_v8_0_resume ,
. is_idle = gmc_v8_0_is_idle ,
. wait_for_idle = gmc_v8_0_wait_for_idle ,
2016-07-18 16:59:24 +08:00
. check_soft_reset = gmc_v8_0_check_soft_reset ,
. pre_soft_reset = gmc_v8_0_pre_soft_reset ,
2015-04-20 17:31:14 -04:00
. soft_reset = gmc_v8_0_soft_reset ,
2016-07-18 16:59:24 +08:00
. post_soft_reset = gmc_v8_0_post_soft_reset ,
2015-04-20 17:31:14 -04:00
. set_clockgating_state = gmc_v8_0_set_clockgating_state ,
. set_powergating_state = gmc_v8_0_set_powergating_state ,
2017-01-05 20:03:27 +08:00
. get_clockgating_state = gmc_v8_0_get_clockgating_state ,
2015-04-20 17:31:14 -04:00
} ;
2018-01-12 15:26:08 +01:00
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
. flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb ,
2018-01-12 19:14:42 +01:00
. emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb ,
2018-02-04 10:32:35 +01:00
. emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping ,
2017-01-18 13:40:48 +01:00
. set_prt = gmc_v8_0_set_prt ,
2019-09-02 16:39:40 +02:00
. get_vm_pde = gmc_v8_0_get_vm_pde ,
. get_vm_pte = gmc_v8_0_get_vm_pte
2015-04-20 17:31:14 -04:00
} ;
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
. set = gmc_v8_0_vm_fault_interrupt_state ,
. process = gmc_v8_0_process_interrupt ,
} ;
2018-01-12 15:26:08 +01:00
static void gmc_v8_0_set_gmc_funcs ( struct amdgpu_device * adev )
2015-04-20 17:31:14 -04:00
{
2018-09-17 15:41:45 +02:00
adev - > gmc . gmc_funcs = & gmc_v8_0_gmc_funcs ;
2015-04-20 17:31:14 -04:00
}
static void gmc_v8_0_set_irq_funcs ( struct amdgpu_device * adev )
{
2018-01-12 14:52:22 +01:00
adev - > gmc . vm_fault . num_types = 1 ;
adev - > gmc . vm_fault . funcs = & gmc_v8_0_irq_funcs ;
2015-04-20 17:31:14 -04:00
}
2016-10-13 17:41:13 -04:00
const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_GMC ,
. major = 8 ,
. minor = 0 ,
. rev = 0 ,
. funcs = & gmc_v8_0_ip_funcs ,
} ;
const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_GMC ,
. major = 8 ,
. minor = 1 ,
. rev = 0 ,
. funcs = & gmc_v8_0_ip_funcs ,
} ;
const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_GMC ,
. major = 8 ,
. minor = 5 ,
. rev = 0 ,
. funcs = & gmc_v8_0_ip_funcs ,
} ;