2017-03-09 11:36:26 -05:00
/*
* Copyright 2016 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
2019-06-10 00:07:51 +02:00
2017-03-09 11:36:26 -05:00
# include <linux/firmware.h>
2019-06-10 00:07:51 +02:00
# include <linux/pci.h>
2018-02-09 10:44:09 +08:00
# include <drm/drm_cache.h>
2019-06-10 00:07:51 +02:00
2017-03-09 11:36:26 -05:00
# include "amdgpu.h"
# include "gmc_v9_0.h"
2017-07-05 15:37:35 -04:00
# include "amdgpu_atomfirmware.h"
2018-08-13 11:41:35 -05:00
# include "amdgpu_gem.h"
2017-03-09 11:36:26 -05:00
2017-11-15 18:09:33 +08:00
# include "hdp/hdp_4_0_offset.h"
# include "hdp/hdp_4_0_sh_mask.h"
2017-11-24 10:29:00 +08:00
# include "gc/gc_9_0_sh_mask.h"
2017-11-23 18:18:14 +08:00
# include "dce/dce_12_0_offset.h"
# include "dce/dce_12_0_sh_mask.h"
2017-11-24 12:31:36 +08:00
# include "vega10_enum.h"
2017-11-23 14:30:43 +08:00
# include "mmhub/mmhub_1_0_offset.h"
2019-12-19 23:40:19 -06:00
# include "athub/athub_1_0_sh_mask.h"
2017-11-16 17:50:10 +08:00
# include "athub/athub_1_0_offset.h"
2018-01-16 20:31:15 +01:00
# include "oss/osssys_4_0_offset.h"
2017-03-09 11:36:26 -05:00
2017-11-28 17:01:21 -05:00
# include "soc15.h"
2019-12-19 23:40:19 -06:00
# include "soc15d.h"
2017-03-09 11:36:26 -05:00
# include "soc15_common.h"
2017-11-22 15:23:20 +08:00
# include "umc/umc_6_0_sh_mask.h"
2017-03-09 11:36:26 -05:00
# include "gfxhub_v1_0.h"
# include "mmhub_v1_0.h"
2019-08-08 14:54:12 +08:00
# include "athub_v1_0.h"
2018-06-19 17:03:27 -05:00
# include "gfxhub_v1_1.h"
2018-09-04 15:29:52 +08:00
# include "mmhub_v9_4.h"
2019-07-17 21:47:44 +08:00
# include "umc_v6_1.h"
2019-09-24 16:08:00 +08:00
# include "umc_v6_0.h"
2017-03-09 11:36:26 -05:00
2018-05-25 10:45:34 -04:00
# include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
2019-01-23 19:03:25 +08:00
# include "amdgpu_ras.h"
2019-09-10 11:13:39 +08:00
# include "amdgpu_xgmi.h"
2019-01-23 19:03:25 +08:00
2018-04-06 14:54:09 -05:00
/* add these here since we already include dce12 headers and these are for DCN */
# define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
# define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
# define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
# define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
# define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
# define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
2017-03-09 11:36:26 -05:00
/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
# define AMDGPU_NUM_OF_VMIDS 8
static const u32 golden_settings_vega10_hdp [ ] =
{
0xf64 , 0x0fffffff , 0x00000000 ,
0xf65 , 0x0fffffff , 0x00000000 ,
0xf66 , 0x0fffffff , 0x00000000 ,
0xf67 , 0x0fffffff , 0x00000000 ,
0xf68 , 0x0fffffff , 0x00000000 ,
0xf6a , 0x0fffffff , 0x00000000 ,
0xf6b , 0x0fffffff , 0x00000000 ,
0xf6c , 0x0fffffff , 0x00000000 ,
0xf6d , 0x0fffffff , 0x00000000 ,
0xf6e , 0x0fffffff , 0x00000000 ,
} ;
2017-11-28 17:01:21 -05:00
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0 [ ] =
2017-09-20 16:25:40 +08:00
{
2017-11-28 17:01:21 -05:00
SOC15_REG_GOLDEN_VALUE ( MMHUB , 0 , mmDAGB1_WRCLI2 , 0x00000007 , 0xfe5fe0fa ) ,
SOC15_REG_GOLDEN_VALUE ( MMHUB , 0 , mmMMEA1_DRAM_WR_CLI2GRP_MAP0 , 0x00000030 , 0x55555565 )
2017-09-20 16:25:40 +08:00
} ;
2017-11-28 17:01:21 -05:00
static const struct soc15_reg_golden golden_settings_athub_1_0_0 [ ] =
2017-09-20 16:25:40 +08:00
{
2017-11-28 17:01:21 -05:00
SOC15_REG_GOLDEN_VALUE ( ATHUB , 0 , mmRPB_ARB_CNTL , 0x0000ff00 , 0x00000800 ) ,
SOC15_REG_GOLDEN_VALUE ( ATHUB , 0 , mmRPB_ARB_CNTL2 , 0x00ff00ff , 0x00080008 )
2017-09-20 16:25:40 +08:00
} ;
2019-01-23 19:03:25 +08:00
static const uint32_t ecc_umc_mcumc_ctrl_addrs [ ] = {
( 0x000143c0 + 0x00000000 ) ,
( 0x000143c0 + 0x00000800 ) ,
( 0x000143c0 + 0x00001000 ) ,
( 0x000143c0 + 0x00001800 ) ,
( 0x000543c0 + 0x00000000 ) ,
( 0x000543c0 + 0x00000800 ) ,
( 0x000543c0 + 0x00001000 ) ,
( 0x000543c0 + 0x00001800 ) ,
( 0x000943c0 + 0x00000000 ) ,
( 0x000943c0 + 0x00000800 ) ,
( 0x000943c0 + 0x00001000 ) ,
( 0x000943c0 + 0x00001800 ) ,
( 0x000d43c0 + 0x00000000 ) ,
( 0x000d43c0 + 0x00000800 ) ,
( 0x000d43c0 + 0x00001000 ) ,
( 0x000d43c0 + 0x00001800 ) ,
( 0x001143c0 + 0x00000000 ) ,
( 0x001143c0 + 0x00000800 ) ,
( 0x001143c0 + 0x00001000 ) ,
( 0x001143c0 + 0x00001800 ) ,
( 0x001543c0 + 0x00000000 ) ,
( 0x001543c0 + 0x00000800 ) ,
( 0x001543c0 + 0x00001000 ) ,
( 0x001543c0 + 0x00001800 ) ,
( 0x001943c0 + 0x00000000 ) ,
( 0x001943c0 + 0x00000800 ) ,
( 0x001943c0 + 0x00001000 ) ,
( 0x001943c0 + 0x00001800 ) ,
( 0x001d43c0 + 0x00000000 ) ,
( 0x001d43c0 + 0x00000800 ) ,
( 0x001d43c0 + 0x00001000 ) ,
( 0x001d43c0 + 0x00001800 ) ,
2017-09-15 16:30:08 -04:00
} ;
2019-01-23 19:03:25 +08:00
static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs [ ] = {
( 0x000143e0 + 0x00000000 ) ,
( 0x000143e0 + 0x00000800 ) ,
( 0x000143e0 + 0x00001000 ) ,
( 0x000143e0 + 0x00001800 ) ,
( 0x000543e0 + 0x00000000 ) ,
( 0x000543e0 + 0x00000800 ) ,
( 0x000543e0 + 0x00001000 ) ,
( 0x000543e0 + 0x00001800 ) ,
( 0x000943e0 + 0x00000000 ) ,
( 0x000943e0 + 0x00000800 ) ,
( 0x000943e0 + 0x00001000 ) ,
( 0x000943e0 + 0x00001800 ) ,
( 0x000d43e0 + 0x00000000 ) ,
( 0x000d43e0 + 0x00000800 ) ,
( 0x000d43e0 + 0x00001000 ) ,
( 0x000d43e0 + 0x00001800 ) ,
( 0x001143e0 + 0x00000000 ) ,
( 0x001143e0 + 0x00000800 ) ,
( 0x001143e0 + 0x00001000 ) ,
( 0x001143e0 + 0x00001800 ) ,
( 0x001543e0 + 0x00000000 ) ,
( 0x001543e0 + 0x00000800 ) ,
( 0x001543e0 + 0x00001000 ) ,
( 0x001543e0 + 0x00001800 ) ,
( 0x001943e0 + 0x00000000 ) ,
( 0x001943e0 + 0x00000800 ) ,
( 0x001943e0 + 0x00001000 ) ,
( 0x001943e0 + 0x00001800 ) ,
( 0x001d43e0 + 0x00000000 ) ,
( 0x001d43e0 + 0x00000800 ) ,
( 0x001d43e0 + 0x00001000 ) ,
( 0x001d43e0 + 0x00001800 ) ,
2017-09-15 16:30:08 -04:00
} ;
2019-01-23 19:03:25 +08:00
static const uint32_t ecc_umc_mcumc_status_addrs [ ] = {
( 0x000143c2 + 0x00000000 ) ,
( 0x000143c2 + 0x00000800 ) ,
( 0x000143c2 + 0x00001000 ) ,
( 0x000143c2 + 0x00001800 ) ,
( 0x000543c2 + 0x00000000 ) ,
( 0x000543c2 + 0x00000800 ) ,
( 0x000543c2 + 0x00001000 ) ,
( 0x000543c2 + 0x00001800 ) ,
( 0x000943c2 + 0x00000000 ) ,
( 0x000943c2 + 0x00000800 ) ,
( 0x000943c2 + 0x00001000 ) ,
( 0x000943c2 + 0x00001800 ) ,
( 0x000d43c2 + 0x00000000 ) ,
( 0x000d43c2 + 0x00000800 ) ,
( 0x000d43c2 + 0x00001000 ) ,
( 0x000d43c2 + 0x00001800 ) ,
( 0x001143c2 + 0x00000000 ) ,
( 0x001143c2 + 0x00000800 ) ,
( 0x001143c2 + 0x00001000 ) ,
( 0x001143c2 + 0x00001800 ) ,
( 0x001543c2 + 0x00000000 ) ,
( 0x001543c2 + 0x00000800 ) ,
( 0x001543c2 + 0x00001000 ) ,
( 0x001543c2 + 0x00001800 ) ,
( 0x001943c2 + 0x00000000 ) ,
( 0x001943c2 + 0x00000800 ) ,
( 0x001943c2 + 0x00001000 ) ,
( 0x001943c2 + 0x00001800 ) ,
( 0x001d43c2 + 0x00000000 ) ,
( 0x001d43c2 + 0x00000800 ) ,
( 0x001d43c2 + 0x00001000 ) ,
( 0x001d43c2 + 0x00001800 ) ,
2017-09-15 16:30:08 -04:00
} ;
2019-01-23 19:03:25 +08:00
static int gmc_v9_0_ecc_interrupt_state ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * src ,
unsigned type ,
enum amdgpu_interrupt_state state )
{
u32 bits , i , tmp , reg ;
2019-12-20 16:21:32 +08:00
/* Devices newer then VEGA10/12 shall have these programming
sequences performed by PSP BL */
if ( adev - > asic_type > = CHIP_VEGA20 )
return 0 ;
2019-01-23 19:03:25 +08:00
bits = 0x7f ;
switch ( state ) {
case AMDGPU_IRQ_STATE_DISABLE :
for ( i = 0 ; i < ARRAY_SIZE ( ecc_umc_mcumc_ctrl_addrs ) ; i + + ) {
reg = ecc_umc_mcumc_ctrl_addrs [ i ] ;
tmp = RREG32 ( reg ) ;
tmp & = ~ bits ;
WREG32 ( reg , tmp ) ;
}
for ( i = 0 ; i < ARRAY_SIZE ( ecc_umc_mcumc_ctrl_mask_addrs ) ; i + + ) {
reg = ecc_umc_mcumc_ctrl_mask_addrs [ i ] ;
tmp = RREG32 ( reg ) ;
tmp & = ~ bits ;
WREG32 ( reg , tmp ) ;
}
break ;
case AMDGPU_IRQ_STATE_ENABLE :
for ( i = 0 ; i < ARRAY_SIZE ( ecc_umc_mcumc_ctrl_addrs ) ; i + + ) {
reg = ecc_umc_mcumc_ctrl_addrs [ i ] ;
tmp = RREG32 ( reg ) ;
tmp | = bits ;
WREG32 ( reg , tmp ) ;
}
for ( i = 0 ; i < ARRAY_SIZE ( ecc_umc_mcumc_ctrl_mask_addrs ) ; i + + ) {
reg = ecc_umc_mcumc_ctrl_mask_addrs [ i ] ;
tmp = RREG32 ( reg ) ;
tmp | = bits ;
WREG32 ( reg , tmp ) ;
}
break ;
default :
break ;
}
return 0 ;
}
2017-03-09 11:36:26 -05:00
static int gmc_v9_0_vm_fault_interrupt_state ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * src ,
unsigned type ,
enum amdgpu_interrupt_state state )
{
struct amdgpu_vmhub * hub ;
2017-09-01 09:27:31 -04:00
u32 tmp , reg , bits , i , j ;
2017-03-09 11:36:26 -05:00
2017-03-30 15:31:13 +02:00
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK ;
2017-03-09 11:36:26 -05:00
switch ( state ) {
case AMDGPU_IRQ_STATE_DISABLE :
2018-08-31 14:17:28 +08:00
for ( j = 0 ; j < adev - > num_vmhubs ; j + + ) {
2017-09-01 09:27:31 -04:00
hub = & adev - > vmhub [ j ] ;
for ( i = 0 ; i < 16 ; i + + ) {
reg = hub - > vm_context0_cntl + i ;
tmp = RREG32 ( reg ) ;
tmp & = ~ bits ;
WREG32 ( reg , tmp ) ;
}
2017-03-09 11:36:26 -05:00
}
break ;
case AMDGPU_IRQ_STATE_ENABLE :
2018-08-31 14:17:28 +08:00
for ( j = 0 ; j < adev - > num_vmhubs ; j + + ) {
2017-09-01 09:27:31 -04:00
hub = & adev - > vmhub [ j ] ;
for ( i = 0 ; i < 16 ; i + + ) {
reg = hub - > vm_context0_cntl + i ;
tmp = RREG32 ( reg ) ;
tmp | = bits ;
WREG32 ( reg , tmp ) ;
}
2017-03-09 11:36:26 -05:00
}
default :
break ;
}
return 0 ;
}
static int gmc_v9_0_process_interrupt ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * source ,
struct amdgpu_iv_entry * entry )
{
2018-09-06 19:37:51 +08:00
struct amdgpu_vmhub * hub ;
2018-12-11 11:06:59 +01:00
bool retry_fault = ! ! ( entry - > src_data [ 1 ] & 0x80 ) ;
2017-03-28 13:42:31 -04:00
uint32_t status = 0 ;
2017-03-09 11:36:26 -05:00
u64 addr ;
2018-09-06 19:37:51 +08:00
char hub_name [ 10 ] ;
2017-03-09 11:36:26 -05:00
addr = ( u64 ) entry - > src_data [ 0 ] < < 12 ;
addr | = ( ( u64 ) entry - > src_data [ 1 ] & 0xf ) < < 44 ;
2018-11-07 13:55:01 +01:00
if ( retry_fault & & amdgpu_gmc_filter_faults ( adev , addr , entry - > pasid ,
entry - > timestamp ) )
2018-09-26 11:50:09 +02:00
return 1 ; /* This also prevents sending it to KFD */
2018-09-06 19:37:51 +08:00
if ( entry - > client_id = = SOC15_IH_CLIENTID_VMC ) {
snprintf ( hub_name , sizeof ( hub_name ) , " mmhub0 " ) ;
hub = & adev - > vmhub [ AMDGPU_MMHUB_0 ] ;
} else if ( entry - > client_id = = SOC15_IH_CLIENTID_VMC1 ) {
snprintf ( hub_name , sizeof ( hub_name ) , " mmhub1 " ) ;
hub = & adev - > vmhub [ AMDGPU_MMHUB_1 ] ;
} else {
snprintf ( hub_name , sizeof ( hub_name ) , " gfxhub0 " ) ;
hub = & adev - > vmhub [ AMDGPU_GFXHUB_0 ] ;
}
2018-11-07 13:55:01 +01:00
/* If it's the first fault for this address, process it normally */
2018-12-07 15:18:43 +01:00
if ( retry_fault & & ! in_interrupt ( ) & &
amdgpu_vm_handle_fault ( adev , entry - > pasid , addr ) )
return 1 ; /* This also prevents sending it to KFD */
2017-03-22 18:01:59 +08:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
2019-08-16 16:13:28 +08:00
/*
* Issue a dummy read to wait for the status register to
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface .
*/
if ( entry - > vmid_src = = AMDGPU_GFXHUB_0 )
RREG32 ( hub - > vm_l2_pro_fault_status ) ;
2017-03-30 14:37:23 +02:00
status = RREG32 ( hub - > vm_l2_pro_fault_status ) ;
WREG32_P ( hub - > vm_l2_pro_fault_cntl , 1 , ~ 1 ) ;
2017-03-28 13:42:31 -04:00
}
2017-03-09 11:36:26 -05:00
2017-03-28 13:42:31 -04:00
if ( printk_ratelimit ( ) ) {
2018-12-20 16:04:35 +05:30
struct amdgpu_task_info task_info ;
2018-06-28 22:55:27 -04:00
2018-12-20 16:04:35 +05:30
memset ( & task_info , 0 , sizeof ( struct amdgpu_task_info ) ) ;
2018-06-28 22:55:27 -04:00
amdgpu_vm_get_task_info ( adev , entry - > pasid , & task_info ) ;
2017-03-28 13:42:31 -04:00
dev_err ( adev - > dev ,
2018-12-11 11:06:59 +01:00
" [%s] %s page fault (src_id:%u ring:%u vmid:%u "
" pasid:%u, for process %s pid %d thread %s pid %d) \n " ,
2018-09-06 19:37:51 +08:00
hub_name , retry_fault ? " retry " : " no-retry " ,
2017-12-18 17:08:25 +01:00
entry - > src_id , entry - > ring_id , entry - > vmid ,
2018-06-28 22:55:27 -04:00
entry - > pasid , task_info . process_name , task_info . tgid ,
task_info . task_name , task_info . pid ) ;
2019-07-01 00:48:40 -04:00
dev_err ( adev - > dev , " in page starting at address 0x%016llx from client %d \n " ,
2017-03-28 13:42:31 -04:00
addr , entry - > client_id ) ;
2019-07-01 00:48:40 -04:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
2017-03-28 13:42:31 -04:00
dev_err ( adev - > dev ,
" VM_L2_PROTECTION_FAULT_STATUS:0x%08X \n " ,
status ) ;
2019-07-01 00:48:40 -04:00
dev_err ( adev - > dev , " \t MORE_FAULTS: 0x%lx \n " ,
REG_GET_FIELD ( status ,
VM_L2_PROTECTION_FAULT_STATUS , MORE_FAULTS ) ) ;
dev_err ( adev - > dev , " \t WALKER_ERROR: 0x%lx \n " ,
REG_GET_FIELD ( status ,
VM_L2_PROTECTION_FAULT_STATUS , WALKER_ERROR ) ) ;
dev_err ( adev - > dev , " \t PERMISSION_FAULTS: 0x%lx \n " ,
REG_GET_FIELD ( status ,
VM_L2_PROTECTION_FAULT_STATUS , PERMISSION_FAULTS ) ) ;
dev_err ( adev - > dev , " \t MAPPING_ERROR: 0x%lx \n " ,
REG_GET_FIELD ( status ,
VM_L2_PROTECTION_FAULT_STATUS , MAPPING_ERROR ) ) ;
2019-08-13 14:04:26 -04:00
dev_err ( adev - > dev , " \t RW: 0x%lx \n " ,
REG_GET_FIELD ( status ,
VM_L2_PROTECTION_FAULT_STATUS , RW ) ) ;
2019-07-01 00:48:40 -04:00
}
2017-03-22 18:01:59 +08:00
}
2017-03-09 11:36:26 -05:00
return 0 ;
}
static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
. set = gmc_v9_0_vm_fault_interrupt_state ,
. process = gmc_v9_0_process_interrupt ,
} ;
2019-01-23 19:03:25 +08:00
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
. set = gmc_v9_0_ecc_interrupt_state ,
2019-09-12 11:11:25 +08:00
. process = amdgpu_umc_process_ecc_irq ,
2019-01-23 19:03:25 +08:00
} ;
2017-03-09 11:36:26 -05:00
static void gmc_v9_0_set_irq_funcs ( struct amdgpu_device * adev )
{
2018-01-12 14:52:22 +01:00
adev - > gmc . vm_fault . num_types = 1 ;
adev - > gmc . vm_fault . funcs = & gmc_v9_0_irq_funcs ;
2019-01-23 19:03:25 +08:00
2019-12-10 10:16:31 -05:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
adev - > gmc . ecc_irq . num_types = 1 ;
adev - > gmc . ecc_irq . funcs = & gmc_v9_0_ecc_funcs ;
}
2017-03-09 11:36:26 -05:00
}
2018-10-12 17:17:05 -04:00
static uint32_t gmc_v9_0_get_invalidate_req ( unsigned int vmid ,
uint32_t flush_type )
2017-04-04 16:07:45 +02:00
{
u32 req = 0 ;
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ ,
2017-12-18 17:08:25 +01:00
PER_VMID_INVALIDATE_REQ , 1 < < vmid ) ;
2018-10-12 17:17:05 -04:00
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ , FLUSH_TYPE , flush_type ) ;
2017-04-04 16:07:45 +02:00
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PTES , 1 ) ;
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PDE0 , 1 ) ;
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PDE1 , 1 ) ;
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PDE2 , 1 ) ;
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ , INVALIDATE_L1_PTES , 1 ) ;
req = REG_SET_FIELD ( req , VM_INVALIDATE_ENG0_REQ ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR , 0 ) ;
return req ;
}
2019-12-10 22:00:59 +08:00
/**
* gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
*
* @ adev : amdgpu_device pointer
* @ vmhub : vmhub type
*
*/
static bool gmc_v9_0_use_invalidate_semaphore ( struct amdgpu_device * adev ,
uint32_t vmhub )
{
return ( ( vmhub = = AMDGPU_MMHUB_0 | |
vmhub = = AMDGPU_MMHUB_1 ) & &
( ! amdgpu_sriov_vf ( adev ) ) & &
( ! ( adev - > asic_type = = CHIP_RAVEN & &
adev - > rev_id < 0x8 & &
adev - > pdev - > device = = 0x15d8 ) ) ) ;
}
2019-12-19 23:40:19 -06:00
static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info ( struct amdgpu_device * adev ,
uint8_t vmid , uint16_t * p_pasid )
{
uint32_t value ;
value = RREG32 ( SOC15_REG_OFFSET ( ATHUB , 0 , mmATC_VMID0_PASID_MAPPING )
+ vmid ) ;
* p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK ;
return ! ! ( value & ATC_VMID0_PASID_MAPPING__VALID_MASK ) ;
}
2017-03-09 11:36:26 -05:00
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel .
* VMIDs 1 - 15 are used for userspace clients and are handled
* by the amdgpu vm / hsa code .
*/
/**
2018-10-12 17:17:05 -04:00
* gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
2017-03-09 11:36:26 -05:00
*
* @ adev : amdgpu_device pointer
* @ vmid : vm instance to flush
2018-10-12 17:17:05 -04:00
* @ flush_type : the flush type
2017-03-09 11:36:26 -05:00
*
2018-10-12 17:17:05 -04:00
* Flush the TLB for the requested page table using certain type .
2017-03-09 11:36:26 -05:00
*/
2019-08-01 14:55:45 -05:00
static void gmc_v9_0_flush_gpu_tlb ( struct amdgpu_device * adev , uint32_t vmid ,
uint32_t vmhub , uint32_t flush_type )
2017-03-09 11:36:26 -05:00
{
2019-12-10 22:00:59 +08:00
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore ( adev , vmhub ) ;
2017-03-09 11:36:26 -05:00
const unsigned eng = 17 ;
2020-01-17 20:29:13 -05:00
u32 j , inv_req , inv_req2 , tmp ;
2019-08-01 14:55:45 -05:00
struct amdgpu_vmhub * hub ;
2017-03-09 11:36:26 -05:00
2019-08-01 14:55:45 -05:00
BUG_ON ( vmhub > = adev - > num_vmhubs ) ;
2017-03-09 11:36:26 -05:00
2019-08-01 14:55:45 -05:00
hub = & adev - > vmhub [ vmhub ] ;
2020-01-17 20:29:13 -05:00
if ( adev - > gmc . xgmi . num_physical_nodes & &
adev - > asic_type = = CHIP_VEGA20 ) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy - weight TLB flush ( type 2 ) , which flushes
* both . Due to a race condition with concurrent
* memory accesses using the same TLB cache line , we
* still need a second TLB flush after this .
*/
inv_req = gmc_v9_0_get_invalidate_req ( vmid , 2 ) ;
inv_req2 = gmc_v9_0_get_invalidate_req ( vmid , flush_type ) ;
} else {
inv_req = gmc_v9_0_get_invalidate_req ( vmid , flush_type ) ;
inv_req2 = 0 ;
}
2018-08-17 18:25:36 +08:00
2019-08-01 14:55:45 -05:00
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
*/
if ( adev - > gfx . kiq . ring . sched . ready & &
( amdgpu_sriov_runtime ( adev ) | | ! amdgpu_sriov_vf ( adev ) ) & &
! adev - > in_gpu_reset ) {
uint32_t req = hub - > vm_inv_eng0_req + eng ;
uint32_t ack = hub - > vm_inv_eng0_ack + eng ;
2020-01-17 19:54:45 -05:00
amdgpu_virt_kiq_reg_write_reg_wait ( adev , req , ack , inv_req ,
2019-08-01 14:55:45 -05:00
1 < < vmid ) ;
return ;
}
2018-10-25 10:50:42 +02:00
2019-08-01 14:55:45 -05:00
spin_lock ( & adev - > gmc . invalidate_lock ) ;
2019-11-19 11:13:29 +08:00
/*
* It may lose gpuvm invalidate acknowldege state across power - gating
* off cycle , add semaphore acquire before invalidation and semaphore
* release after invalidation to avoid entering power gated state
* to WA the Issue
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:00:59 +08:00
if ( use_semaphore ) {
2019-11-19 11:13:29 +08:00
for ( j = 0 ; j < adev - > usec_timeout ; j + + ) {
/* a read return value of 1 means semaphore acuqire */
tmp = RREG32_NO_KIQ ( hub - > vm_inv_eng0_sem + eng ) ;
if ( tmp & 0x1 )
break ;
udelay ( 1 ) ;
}
if ( j > = adev - > usec_timeout )
DRM_ERROR ( " Timeout waiting for sem acquire in VM flush! \n " ) ;
}
2020-01-17 20:29:13 -05:00
do {
WREG32_NO_KIQ ( hub - > vm_inv_eng0_req + eng , inv_req ) ;
2019-08-16 16:13:28 +08:00
2020-01-17 20:29:13 -05:00
/*
* Issue a dummy read to wait for the ACK register to
* be cleared to avoid a false ACK due to the new fast
* GRBM interface .
*/
if ( vmhub = = AMDGPU_GFXHUB_0 )
RREG32_NO_KIQ ( hub - > vm_inv_eng0_req + eng ) ;
2019-08-16 16:13:28 +08:00
2020-01-17 20:29:13 -05:00
for ( j = 0 ; j < adev - > usec_timeout ; j + + ) {
tmp = RREG32_NO_KIQ ( hub - > vm_inv_eng0_ack + eng ) ;
if ( tmp & ( 1 < < vmid ) )
break ;
udelay ( 1 ) ;
}
inv_req = inv_req2 ;
inv_req2 = 0 ;
} while ( inv_req ) ;
2019-11-19 11:13:29 +08:00
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:00:59 +08:00
if ( use_semaphore )
2019-11-19 11:13:29 +08:00
/*
* add semaphore release after invalidation ,
* write with 0 means semaphore release
*/
WREG32_NO_KIQ ( hub - > vm_inv_eng0_sem + eng , 0 ) ;
2019-08-01 14:55:45 -05:00
spin_unlock ( & adev - > gmc . invalidate_lock ) ;
2019-11-19 11:13:29 +08:00
2019-08-01 14:55:45 -05:00
if ( j < adev - > usec_timeout )
return ;
DRM_ERROR ( " Timeout waiting for VM flush ACK! \n " ) ;
2017-03-09 11:36:26 -05:00
}
2019-12-19 23:40:19 -06:00
/**
* gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @ adev : amdgpu_device pointer
* @ pasid : pasid to be flush
*
* Flush the TLB for the requested pasid .
*/
static int gmc_v9_0_flush_gpu_tlb_pasid ( struct amdgpu_device * adev ,
uint16_t pasid , uint32_t flush_type ,
bool all_hub )
{
int vmid , i ;
signed long r ;
uint32_t seq ;
uint16_t queried_pasid ;
bool ret ;
struct amdgpu_ring * ring = & adev - > gfx . kiq . ring ;
struct amdgpu_kiq * kiq = & adev - > gfx . kiq ;
if ( adev - > in_gpu_reset )
return - EIO ;
if ( ring - > sched . ready ) {
2020-01-17 20:29:13 -05:00
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy - weight TLB flush ( type 2 ) , which flushes
* both . Due to a race condition with concurrent
* memory accesses using the same TLB cache line , we
* still need a second TLB flush after this .
*/
bool vega20_xgmi_wa = ( adev - > gmc . xgmi . num_physical_nodes & &
adev - > asic_type = = CHIP_VEGA20 ) ;
/* 2 dwords flush + 8 dwords fence */
unsigned int ndw = kiq - > pmf - > invalidate_tlbs_size + 8 ;
if ( vega20_xgmi_wa )
ndw + = kiq - > pmf - > invalidate_tlbs_size ;
2019-12-19 23:40:19 -06:00
spin_lock ( & adev - > gfx . kiq . ring_lock ) ;
2020-01-13 21:27:56 -06:00
/* 2 dwords flush + 8 dwords fence */
2020-01-17 20:29:13 -05:00
amdgpu_ring_alloc ( ring , ndw ) ;
if ( vega20_xgmi_wa )
kiq - > pmf - > kiq_invalidate_tlbs ( ring ,
pasid , 2 , all_hub ) ;
2019-12-19 23:40:19 -06:00
kiq - > pmf - > kiq_invalidate_tlbs ( ring ,
pasid , flush_type , all_hub ) ;
amdgpu_fence_emit_polling ( ring , & seq ) ;
amdgpu_ring_commit ( ring ) ;
spin_unlock ( & adev - > gfx . kiq . ring_lock ) ;
r = amdgpu_fence_wait_polling ( ring , seq , adev - > usec_timeout ) ;
if ( r < 1 ) {
DRM_ERROR ( " wait for kiq fence error: %ld. \n " , r ) ;
return - ETIME ;
}
return 0 ;
}
for ( vmid = 1 ; vmid < 16 ; vmid + + ) {
ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info ( adev , vmid ,
& queried_pasid ) ;
if ( ret & & queried_pasid = = pasid ) {
if ( all_hub ) {
for ( i = 0 ; i < adev - > num_vmhubs ; i + + )
gmc_v9_0_flush_gpu_tlb ( adev , vmid ,
2020-01-17 20:08:42 -05:00
i , flush_type ) ;
2019-12-19 23:40:19 -06:00
} else {
gmc_v9_0_flush_gpu_tlb ( adev , vmid ,
2020-01-17 20:08:42 -05:00
AMDGPU_GFXHUB_0 , flush_type ) ;
2019-12-19 23:40:19 -06:00
}
break ;
}
}
return 0 ;
}
2018-01-12 21:57:53 +01:00
static uint64_t gmc_v9_0_emit_flush_gpu_tlb ( struct amdgpu_ring * ring ,
2018-02-04 10:32:35 +01:00
unsigned vmid , uint64_t pd_addr )
2018-01-12 21:57:53 +01:00
{
2019-12-10 22:00:59 +08:00
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore ( ring - > adev , ring - > funcs - > vmhub ) ;
2018-01-16 20:31:15 +01:00
struct amdgpu_device * adev = ring - > adev ;
struct amdgpu_vmhub * hub = & adev - > vmhub [ ring - > funcs - > vmhub ] ;
2018-10-12 17:17:05 -04:00
uint32_t req = gmc_v9_0_get_invalidate_req ( vmid , 0 ) ;
2018-01-12 21:57:53 +01:00
unsigned eng = ring - > vm_inv_eng ;
2019-11-19 11:13:29 +08:00
/*
* It may lose gpuvm invalidate acknowldege state across power - gating
* off cycle , add semaphore acquire before invalidation and semaphore
* release after invalidation to avoid entering power gated state
* to WA the Issue
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:00:59 +08:00
if ( use_semaphore )
2019-11-19 11:13:29 +08:00
/* a read return value of 1 means semaphore acuqire */
amdgpu_ring_emit_reg_wait ( ring ,
hub - > vm_inv_eng0_sem + eng , 0x1 , 0x1 ) ;
2018-01-12 21:57:53 +01:00
amdgpu_ring_emit_wreg ( ring , hub - > ctx0_ptb_addr_lo32 + ( 2 * vmid ) ,
lower_32_bits ( pd_addr ) ) ;
amdgpu_ring_emit_wreg ( ring , hub - > ctx0_ptb_addr_hi32 + ( 2 * vmid ) ,
upper_32_bits ( pd_addr ) ) ;
2018-03-27 17:10:56 -05:00
amdgpu_ring_emit_reg_write_reg_wait ( ring , hub - > vm_inv_eng0_req + eng ,
hub - > vm_inv_eng0_ack + eng ,
req , 1 < < vmid ) ;
2018-01-26 15:00:43 +01:00
2019-11-19 11:13:29 +08:00
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:00:59 +08:00
if ( use_semaphore )
2019-11-19 11:13:29 +08:00
/*
* add semaphore release after invalidation ,
* write with 0 means semaphore release
*/
amdgpu_ring_emit_wreg ( ring , hub - > vm_inv_eng0_sem + eng , 0 ) ;
2018-01-12 21:57:53 +01:00
return pd_addr ;
}
2018-02-04 10:32:35 +01:00
static void gmc_v9_0_emit_pasid_mapping ( struct amdgpu_ring * ring , unsigned vmid ,
unsigned pasid )
{
struct amdgpu_device * adev = ring - > adev ;
uint32_t reg ;
2018-09-11 13:11:28 +08:00
/* Do nothing because there's no lut register for mmhub1. */
if ( ring - > funcs - > vmhub = = AMDGPU_MMHUB_1 )
return ;
2019-07-16 13:29:19 -05:00
if ( ring - > funcs - > vmhub = = AMDGPU_GFXHUB_0 )
2018-02-04 10:32:35 +01:00
reg = SOC15_REG_OFFSET ( OSSSYS , 0 , mmIH_VMID_0_LUT ) + vmid ;
else
reg = SOC15_REG_OFFSET ( OSSSYS , 0 , mmIH_VMID_0_LUT_MM ) + vmid ;
amdgpu_ring_emit_wreg ( ring , reg , pasid ) ;
}
2019-02-25 12:56:53 -05:00
/*
* PTE format on VEGA 10 :
* 63 : 59 reserved
* 58 : 57 mtype
* 56 F
* 55 L
* 54 P
* 53 SW
* 52 T
* 50 : 48 reserved
* 47 : 12 4 k physical page base address
* 11 : 7 fragment
* 6 write
* 5 read
* 4 exe
* 3 Z
* 2 snooped
* 1 system
* 0 valid
2017-03-09 11:36:26 -05:00
*
2019-02-25 12:56:53 -05:00
* PDE format on VEGA 10 :
* 63 : 59 block fragment size
* 58 : 55 reserved
* 54 P
* 53 : 48 reserved
* 47 : 6 physical base address of PD or PTE
* 5 : 3 reserved
* 2 C
* 1 system
* 0 valid
2017-03-09 11:36:26 -05:00
*/
2019-09-02 14:52:30 +02:00
static uint64_t gmc_v9_0_map_mtype ( struct amdgpu_device * adev , uint32_t flags )
2017-03-09 11:36:26 -05:00
{
2019-09-02 14:52:30 +02:00
switch ( flags ) {
2017-03-09 11:36:26 -05:00
case AMDGPU_VM_MTYPE_DEFAULT :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_NC ) ;
2017-03-09 11:36:26 -05:00
case AMDGPU_VM_MTYPE_NC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_NC ) ;
2017-03-09 11:36:26 -05:00
case AMDGPU_VM_MTYPE_WC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_WC ) ;
2019-07-26 16:03:11 -05:00
case AMDGPU_VM_MTYPE_RW :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_RW ) ;
2017-03-09 11:36:26 -05:00
case AMDGPU_VM_MTYPE_CC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_CC ) ;
2017-03-09 11:36:26 -05:00
case AMDGPU_VM_MTYPE_UC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_UC ) ;
2017-03-09 11:36:26 -05:00
default :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_VG10 ( MTYPE_NC ) ;
2017-03-09 11:36:26 -05:00
}
}
2017-11-29 13:27:26 +01:00
static void gmc_v9_0_get_vm_pde ( struct amdgpu_device * adev , int level ,
uint64_t * addr , uint64_t * flags )
2017-03-09 11:36:26 -05:00
{
2018-08-22 12:27:05 +02:00
if ( ! ( * flags & AMDGPU_PDE_PTE ) & & ! ( * flags & AMDGPU_PTE_SYSTEM ) )
2017-11-29 13:27:26 +01:00
* addr = adev - > vm_manager . vram_base_offset + * addr -
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_start ;
2017-11-29 13:27:26 +01:00
BUG_ON ( * addr & 0xFFFF00000000003FULL ) ;
2017-12-05 15:23:26 +01:00
2018-01-12 14:52:22 +01:00
if ( ! adev - > gmc . translate_further )
2017-12-05 15:23:26 +01:00
return ;
if ( level = = AMDGPU_VM_PDB1 ) {
/* Set the block fragment size */
if ( ! ( * flags & AMDGPU_PDE_PTE ) )
* flags | = AMDGPU_PDE_BFS ( 0x9 ) ;
} else if ( level = = AMDGPU_VM_PDB0 ) {
if ( * flags & AMDGPU_PDE_PTE )
* flags & = ~ AMDGPU_PDE_PTE ;
else
* flags | = AMDGPU_PTE_TF ;
}
2017-03-09 11:36:26 -05:00
}
2019-09-02 16:39:40 +02:00
static void gmc_v9_0_get_vm_pte ( struct amdgpu_device * adev ,
struct amdgpu_bo_va_mapping * mapping ,
uint64_t * flags )
{
* flags & = ~ AMDGPU_PTE_EXECUTABLE ;
* flags | = mapping - > flags & AMDGPU_PTE_EXECUTABLE ;
* flags & = ~ AMDGPU_PTE_MTYPE_VG10_MASK ;
* flags | = mapping - > flags & AMDGPU_PTE_MTYPE_VG10_MASK ;
if ( mapping - > flags & AMDGPU_PTE_PRT ) {
* flags | = AMDGPU_PTE_PRT ;
* flags & = ~ AMDGPU_PTE_VALID ;
}
if ( adev - > asic_type = = CHIP_ARCTURUS & &
! ( * flags & AMDGPU_PTE_SYSTEM ) & &
mapping - > bo_va - > is_xgmi )
* flags | = AMDGPU_PTE_SNOOPED ;
}
2018-01-12 15:26:08 +01:00
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
. flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb ,
2019-12-19 23:40:19 -06:00
. flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid ,
2018-01-12 21:57:53 +01:00
. emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb ,
2018-02-04 10:32:35 +01:00
. emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping ,
2019-09-02 14:52:30 +02:00
. map_mtype = gmc_v9_0_map_mtype ,
2019-09-02 16:39:40 +02:00
. get_vm_pde = gmc_v9_0_get_vm_pde ,
. get_vm_pte = gmc_v9_0_get_vm_pte
2017-03-09 11:36:26 -05:00
} ;
2018-01-12 15:26:08 +01:00
static void gmc_v9_0_set_gmc_funcs ( struct amdgpu_device * adev )
2017-03-09 11:36:26 -05:00
{
2018-09-17 15:41:45 +02:00
adev - > gmc . gmc_funcs = & gmc_v9_0_gmc_funcs ;
2017-03-09 11:36:26 -05:00
}
2019-07-17 21:47:44 +08:00
static void gmc_v9_0_set_umc_funcs ( struct amdgpu_device * adev )
{
switch ( adev - > asic_type ) {
2019-09-24 16:08:00 +08:00
case CHIP_VEGA10 :
adev - > umc . funcs = & umc_v6_0_funcs ;
break ;
2019-07-17 21:47:44 +08:00
case CHIP_VEGA20 :
2019-07-29 14:28:35 +08:00
adev - > umc . max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM ;
adev - > umc . channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM ;
adev - > umc . umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM ;
2019-12-11 10:18:55 +08:00
adev - > umc . channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20 ;
adev - > umc . channel_idx_tbl = & umc_v6_1_channel_idx_tbl [ 0 ] [ 0 ] ;
adev - > umc . funcs = & umc_v6_1_funcs ;
break ;
2019-11-13 22:26:22 +08:00
case CHIP_ARCTURUS :
2019-07-29 14:28:35 +08:00
adev - > umc . max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM ;
adev - > umc . channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM ;
adev - > umc . umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM ;
2019-12-11 10:18:55 +08:00
adev - > umc . channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT ;
2019-07-29 14:28:35 +08:00
adev - > umc . channel_idx_tbl = & umc_v6_1_channel_idx_tbl [ 0 ] [ 0 ] ;
2019-07-23 12:18:39 +08:00
adev - > umc . funcs = & umc_v6_1_funcs ;
2019-07-17 21:47:44 +08:00
break ;
default :
break ;
}
}
2019-08-06 20:15:55 +08:00
static void gmc_v9_0_set_mmhub_funcs ( struct amdgpu_device * adev )
{
switch ( adev - > asic_type ) {
case CHIP_VEGA20 :
2019-09-12 17:12:21 +08:00
adev - > mmhub . funcs = & mmhub_v1_0_funcs ;
2019-08-06 20:15:55 +08:00
break ;
2019-11-19 14:02:57 +08:00
case CHIP_ARCTURUS :
adev - > mmhub . funcs = & mmhub_v9_4_funcs ;
break ;
2019-08-06 20:15:55 +08:00
default :
break ;
}
}
2017-03-09 11:36:26 -05:00
static int gmc_v9_0_early_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-01-12 15:26:08 +01:00
gmc_v9_0_set_gmc_funcs ( adev ) ;
2017-03-09 11:36:26 -05:00
gmc_v9_0_set_irq_funcs ( adev ) ;
2019-07-17 21:47:44 +08:00
gmc_v9_0_set_umc_funcs ( adev ) ;
2019-08-06 20:15:55 +08:00
gmc_v9_0_set_mmhub_funcs ( adev ) ;
2017-03-09 11:36:26 -05:00
2018-01-12 14:52:22 +01:00
adev - > gmc . shared_aperture_start = 0x2000000000000000ULL ;
adev - > gmc . shared_aperture_end =
adev - > gmc . shared_aperture_start + ( 4ULL < < 30 ) - 1 ;
2018-04-18 17:12:19 +08:00
adev - > gmc . private_aperture_start = 0x1000000000000000ULL ;
2018-01-12 14:52:22 +01:00
adev - > gmc . private_aperture_end =
adev - > gmc . private_aperture_start + ( 4ULL < < 30 ) - 1 ;
2017-12-08 15:09:20 -05:00
2017-03-09 11:36:26 -05:00
return 0 ;
}
2018-08-30 09:31:56 -05:00
static bool gmc_v9_0_keep_stolen_memory ( struct amdgpu_device * adev )
{
/*
* TODO :
* Currently there is a bug where some memory client outside
* of the driver writes to first 8 M of VRAM on S3 resume ,
* this overrides GART which by default gets placed in first 8 M and
* causes VM_FAULTS once GTT is accessed .
* Keep the stolen memory reservation until the while this is not solved .
* Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
*/
switch ( adev - > asic_type ) {
2018-08-30 09:46:27 -05:00
case CHIP_VEGA10 :
2018-08-30 09:31:56 -05:00
case CHIP_RAVEN :
2019-06-28 11:07:53 +08:00
case CHIP_ARCTURUS :
2019-07-24 13:42:16 -05:00
case CHIP_RENOIR :
2019-05-28 15:52:42 +08:00
return true ;
2018-08-30 09:31:56 -05:00
case CHIP_VEGA12 :
case CHIP_VEGA20 :
default :
2018-08-30 09:46:27 -05:00
return false ;
2018-08-30 09:31:56 -05:00
}
}
2018-11-21 13:04:48 +08:00
static int gmc_v9_0_late_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2019-09-02 19:27:23 +08:00
int r ;
2018-11-21 13:04:48 +08:00
if ( ! gmc_v9_0_keep_stolen_memory ( adev ) )
amdgpu_bo_late_init ( adev ) ;
2020-01-06 13:14:27 -05:00
r = amdgpu_gmc_allocate_vm_inv_eng ( adev ) ;
2018-11-21 13:04:48 +08:00
if ( r )
return r ;
2019-03-07 11:00:20 +08:00
/* Check if ecc is available */
if ( ! amdgpu_sriov_vf ( adev ) ) {
switch ( adev - > asic_type ) {
case CHIP_VEGA10 :
case CHIP_VEGA20 :
2019-11-13 22:26:22 +08:00
case CHIP_ARCTURUS :
2019-03-07 11:00:20 +08:00
r = amdgpu_atomfirmware_mem_ecc_supported ( adev ) ;
if ( ! r ) {
DRM_INFO ( " ECC is not present. \n " ) ;
2020-01-14 10:05:21 -05:00
if ( adev - > df . funcs - > enable_ecc_force_par_wr_rmw )
adev - > df . funcs - > enable_ecc_force_par_wr_rmw ( adev , false ) ;
2019-03-07 11:00:20 +08:00
} else {
DRM_INFO ( " ECC is active. \n " ) ;
}
2017-03-31 11:03:50 +02:00
2019-03-07 11:00:20 +08:00
r = amdgpu_atomfirmware_sram_ecc_supported ( adev ) ;
if ( ! r ) {
DRM_INFO ( " SRAM ECC is not present. \n " ) ;
} else {
DRM_INFO ( " SRAM ECC is active. \n " ) ;
}
break ;
default :
break ;
2018-01-17 09:07:29 -05:00
}
2017-09-15 16:30:08 -04:00
}
2020-03-02 12:14:20 +08:00
if ( adev - > mmhub . funcs & & adev - > mmhub . funcs - > reset_ras_error_count )
adev - > mmhub . funcs - > reset_ras_error_count ( adev ) ;
2019-09-18 18:31:07 +08:00
r = amdgpu_gmc_ras_late_init ( adev ) ;
2019-01-23 19:03:25 +08:00
if ( r )
return r ;
2018-01-12 14:52:22 +01:00
return amdgpu_irq_get ( adev , & adev - > gmc . vm_fault , 0 ) ;
2017-03-09 11:36:26 -05:00
}
static void gmc_v9_0_vram_gtt_location ( struct amdgpu_device * adev ,
2018-01-12 14:52:22 +01:00
struct amdgpu_gmc * mc )
2017-03-09 11:36:26 -05:00
{
2017-03-23 16:32:13 +08:00
u64 base = 0 ;
2019-08-21 11:05:04 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
base = mmhub_v9_4_get_fb_location ( adev ) ;
else if ( ! amdgpu_sriov_vf ( adev ) )
base = mmhub_v1_0_get_fb_location ( adev ) ;
2018-06-19 16:11:56 -05:00
/* add the xgmi offset of the physical node */
base + = adev - > gmc . xgmi . physical_node_id * adev - > gmc . xgmi . node_segment_size ;
2019-03-07 17:00:20 -06:00
amdgpu_gmc_vram_location ( adev , mc , base ) ;
2018-08-23 15:20:43 +02:00
amdgpu_gmc_gart_location ( adev , mc ) ;
2019-08-21 17:20:34 +08:00
amdgpu_gmc_agp_location ( adev , mc ) ;
2017-01-16 10:45:50 +08:00
/* base offset of vram pages */
2018-02-06 12:29:23 +08:00
adev - > vm_manager . vram_base_offset = gfxhub_v1_0_get_mc_fb_offset ( adev ) ;
2018-06-19 16:11:56 -05:00
/* XXX: add the xgmi offset of the physical node? */
adev - > vm_manager . vram_base_offset + =
adev - > gmc . xgmi . physical_node_id * adev - > gmc . xgmi . node_segment_size ;
2017-03-09 11:36:26 -05:00
}
/**
* gmc_v9_0_mc_init - initialize the memory controller driver params
*
* @ adev : amdgpu_device pointer
*
* Look up the amount of vram , vram width , and decide how to place
* vram and gart within the GPU ' s physical address space .
* Returns 0 for success .
*/
static int gmc_v9_0_mc_init ( struct amdgpu_device * adev )
{
2017-02-28 10:36:43 +01:00
int r ;
2017-03-09 11:36:26 -05:00
/* size in MB on si */
2018-01-12 14:52:22 +01:00
adev - > gmc . mc_vram_size =
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > get_memsize ( adev ) * 1024ULL * 1024ULL ;
2018-01-12 14:52:22 +01:00
adev - > gmc . real_vram_size = adev - > gmc . mc_vram_size ;
2017-02-28 10:36:43 +01:00
if ( ! ( adev - > flags & AMD_IS_APU ) ) {
r = amdgpu_device_resize_fb_bar ( adev ) ;
if ( r )
return r ;
}
2018-01-12 14:52:22 +01:00
adev - > gmc . aper_base = pci_resource_start ( adev - > pdev , 0 ) ;
adev - > gmc . aper_size = pci_resource_len ( adev - > pdev , 0 ) ;
2017-03-09 11:36:26 -05:00
2018-01-17 16:51:16 +08:00
# ifdef CONFIG_X86_64
if ( adev - > flags & AMD_IS_APU ) {
adev - > gmc . aper_base = gfxhub_v1_0_get_mc_fb_offset ( adev ) ;
adev - > gmc . aper_size = adev - > gmc . real_vram_size ;
}
# endif
2017-03-09 11:36:26 -05:00
/* In case the PCI BAR is larger than the actual amount of vram */
2018-01-12 14:52:22 +01:00
adev - > gmc . visible_vram_size = adev - > gmc . aper_size ;
if ( adev - > gmc . visible_vram_size > adev - > gmc . real_vram_size )
adev - > gmc . visible_vram_size = adev - > gmc . real_vram_size ;
2017-03-09 11:36:26 -05:00
2017-08-22 13:06:30 -04:00
/* set the gart size */
if ( amdgpu_gart_size = = - 1 ) {
switch ( adev - > asic_type ) {
case CHIP_VEGA10 : /* all engines support GPUVM */
2018-03-13 20:25:08 -05:00
case CHIP_VEGA12 : /* all engines support GPUVM */
2018-04-20 13:56:43 +08:00
case CHIP_VEGA20 :
2018-09-04 14:52:25 +08:00
case CHIP_ARCTURUS :
2017-08-22 13:06:30 -04:00
default :
2018-01-23 19:17:56 +08:00
adev - > gmc . gart_size = 512ULL < < 20 ;
2017-08-22 13:06:30 -04:00
break ;
case CHIP_RAVEN : /* DCE SG support */
2019-07-24 13:42:16 -05:00
case CHIP_RENOIR :
2018-01-12 14:52:22 +01:00
adev - > gmc . gart_size = 1024ULL < < 20 ;
2017-08-22 13:06:30 -04:00
break ;
}
} else {
2018-01-12 14:52:22 +01:00
adev - > gmc . gart_size = ( u64 ) amdgpu_gart_size < < 20 ;
2017-08-22 13:06:30 -04:00
}
2018-01-12 14:52:22 +01:00
gmc_v9_0_vram_gtt_location ( adev , & adev - > gmc ) ;
2017-03-09 11:36:26 -05:00
return 0 ;
}
static int gmc_v9_0_gart_init ( struct amdgpu_device * adev )
{
int r ;
2018-08-21 17:07:47 +02:00
if ( adev - > gart . bo ) {
2017-03-09 11:36:26 -05:00
WARN ( 1 , " VEGA10 PCIE GART already initialized \n " ) ;
return 0 ;
}
/* Initialize common gart structure */
r = amdgpu_gart_init ( adev ) ;
if ( r )
return r ;
adev - > gart . table_size = adev - > gart . num_gpu_pages * 8 ;
2018-06-25 21:03:40 +08:00
adev - > gart . gart_pte_flags = AMDGPU_PTE_MTYPE_VG10 ( MTYPE_UC ) |
2017-03-09 11:36:26 -05:00
AMDGPU_PTE_EXECUTABLE ;
return amdgpu_gart_table_vram_alloc ( adev ) ;
}
2018-04-06 14:54:09 -05:00
static unsigned gmc_v9_0_get_vbios_fb_size ( struct amdgpu_device * adev )
{
2019-06-28 11:07:53 +08:00
u32 d1vga_control ;
2018-04-06 14:54:09 -05:00
unsigned size ;
2018-04-06 14:54:10 -05:00
/*
* TODO Remove once GART corruption is resolved
* Check related code in gmc_v9_0_sw_fini
* */
2018-08-30 09:31:56 -05:00
if ( gmc_v9_0_keep_stolen_memory ( adev ) )
return 9 * 1024 * 1024 ;
2018-04-06 14:54:10 -05:00
2019-06-28 11:07:53 +08:00
d1vga_control = RREG32_SOC15 ( DCE , 0 , mmD1VGA_CONTROL ) ;
2018-04-06 14:54:09 -05:00
if ( REG_GET_FIELD ( d1vga_control , D1VGA_CONTROL , D1VGA_MODE_ENABLE ) ) {
size = 9 * 1024 * 1024 ; /* reserve 8MB for vga emulator and 1 MB for FB */
} else {
u32 viewport ;
switch ( adev - > asic_type ) {
case CHIP_RAVEN :
2019-07-24 13:42:16 -05:00
case CHIP_RENOIR :
2018-04-06 14:54:09 -05:00
viewport = RREG32_SOC15 ( DCE , 0 , mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION ) ;
size = ( REG_GET_FIELD ( viewport ,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION , PRI_VIEWPORT_HEIGHT ) *
REG_GET_FIELD ( viewport ,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION , PRI_VIEWPORT_WIDTH ) *
4 ) ;
break ;
case CHIP_VEGA10 :
case CHIP_VEGA12 :
2018-08-30 09:31:56 -05:00
case CHIP_VEGA20 :
2018-04-06 14:54:09 -05:00
default :
viewport = RREG32_SOC15 ( DCE , 0 , mmSCL0_VIEWPORT_SIZE ) ;
size = ( REG_GET_FIELD ( viewport , SCL0_VIEWPORT_SIZE , VIEWPORT_HEIGHT ) *
REG_GET_FIELD ( viewport , SCL0_VIEWPORT_SIZE , VIEWPORT_WIDTH ) *
4 ) ;
break ;
}
}
/* return 0 if the pre-OS buffer uses up most of vram */
if ( ( adev - > gmc . real_vram_size - size ) < ( 8 * 1024 * 1024 ) )
return 0 ;
2018-04-06 14:54:10 -05:00
2018-04-06 14:54:09 -05:00
return size ;
}
2017-03-09 11:36:26 -05:00
static int gmc_v9_0_sw_init ( void * handle )
{
2019-10-02 10:02:07 -04:00
int r , vram_width = 0 , vram_type = 0 , vram_vendor = 0 ;
2017-03-09 11:36:26 -05:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2017-05-31 22:57:18 +08:00
gfxhub_v1_0_init ( adev ) ;
2018-09-04 15:29:52 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
mmhub_v9_4_init ( adev ) ;
else
mmhub_v1_0_init ( adev ) ;
2017-05-31 22:57:18 +08:00
2018-01-12 14:52:22 +01:00
spin_lock_init ( & adev - > gmc . invalidate_lock ) ;
2017-03-09 11:36:26 -05:00
2019-10-02 10:02:07 -04:00
r = amdgpu_atomfirmware_get_vram_info ( adev ,
& vram_width , & vram_type , & vram_vendor ) ;
2019-09-23 15:12:46 -05:00
if ( amdgpu_sriov_vf ( adev ) )
/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
* and DF related registers is not readable , seems hardcord is the
* only way to set the correct vram_width
*/
adev - > gmc . vram_width = 2048 ;
else if ( amdgpu_emu_mode ! = 1 )
adev - > gmc . vram_width = vram_width ;
if ( ! adev - > gmc . vram_width ) {
int chansize , numchan ;
/* hbm memory channel size */
if ( adev - > flags & AMD_IS_APU )
chansize = 64 ;
else
chansize = 128 ;
2020-01-14 10:05:21 -05:00
numchan = adev - > df . funcs - > get_hbm_channel_number ( adev ) ;
2019-09-23 15:12:46 -05:00
adev - > gmc . vram_width = numchan * chansize ;
}
adev - > gmc . vram_type = vram_type ;
2019-10-02 10:02:07 -04:00
adev - > gmc . vram_vendor = vram_vendor ;
2017-06-22 13:09:43 +08:00
switch ( adev - > asic_type ) {
case CHIP_RAVEN :
2018-08-31 14:17:28 +08:00
adev - > num_vmhubs = 2 ;
2017-12-05 15:23:26 +01:00
if ( adev - > rev_id = = 0x0 | | adev - > rev_id = = 0x1 ) {
2017-11-23 12:57:18 +01:00
amdgpu_vm_adjust_size ( adev , 256 * 1024 , 9 , 3 , 48 ) ;
2017-12-05 15:23:26 +01:00
} else {
/* vm_size is 128TB + 512GB for legacy 3-level page support */
amdgpu_vm_adjust_size ( adev , 128 * 1024 + 512 , 9 , 2 , 48 ) ;
2018-01-12 14:52:22 +01:00
adev - > gmc . translate_further =
2017-12-05 15:23:26 +01:00
adev - > vm_manager . num_level > 1 ;
}
2017-06-22 13:09:43 +08:00
break ;
case CHIP_VEGA10 :
2018-03-13 20:25:08 -05:00
case CHIP_VEGA12 :
2018-04-20 13:56:43 +08:00
case CHIP_VEGA20 :
2019-07-24 13:42:16 -05:00
case CHIP_RENOIR :
2018-08-31 14:17:28 +08:00
adev - > num_vmhubs = 2 ;
2019-07-24 13:42:16 -05:00
2017-03-29 16:08:32 +08:00
/*
* To fulfill 4 - level page support ,
* vm size is 256 TB ( 48 bit ) , maximum size of Vega10 ,
* block size 512 ( 9 bit )
*/
2019-01-24 11:24:59 +08:00
/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
if ( amdgpu_sriov_vf ( adev ) )
amdgpu_vm_adjust_size ( adev , 256 * 1024 , 9 , 3 , 47 ) ;
else
amdgpu_vm_adjust_size ( adev , 256 * 1024 , 9 , 3 , 48 ) ;
2017-06-22 13:09:43 +08:00
break ;
2018-09-04 14:52:25 +08:00
case CHIP_ARCTURUS :
2018-08-31 14:46:47 +08:00
adev - > num_vmhubs = 3 ;
2018-09-04 14:52:25 +08:00
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size ( adev , 256 * 1024 , 9 , 3 , 48 ) ;
break ;
2017-06-22 13:09:43 +08:00
default :
break ;
2017-03-09 11:36:26 -05:00
}
/* This interrupt is VMC page fault.*/
2018-05-25 10:45:34 -04:00
r = amdgpu_irq_add_id ( adev , SOC15_IH_CLIENTID_VMC , VMC_1_0__SRCID__VM_FAULT ,
2018-01-12 14:52:22 +01:00
& adev - > gmc . vm_fault ) ;
2018-09-26 14:17:03 +02:00
if ( r )
return r ;
2018-09-06 17:34:06 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS ) {
r = amdgpu_irq_add_id ( adev , SOC15_IH_CLIENTID_VMC1 , VMC_1_0__SRCID__VM_FAULT ,
& adev - > gmc . vm_fault ) ;
if ( r )
return r ;
}
2018-05-25 10:45:34 -04:00
r = amdgpu_irq_add_id ( adev , SOC15_IH_CLIENTID_UTCL2 , UTCL2_1_0__SRCID__FAULT ,
2018-01-12 14:52:22 +01:00
& adev - > gmc . vm_fault ) ;
2017-03-09 11:36:26 -05:00
if ( r )
return r ;
2019-12-10 10:16:31 -05:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
/* interrupt sent to DF. */
r = amdgpu_irq_add_id ( adev , SOC15_IH_CLIENTID_DF , 0 ,
& adev - > gmc . ecc_irq ) ;
if ( r )
return r ;
}
2019-01-23 19:03:25 +08:00
2017-03-09 11:36:26 -05:00
/* Set the internal MC address mask
* This is the max address of the GPU ' s
* internal address space .
*/
2018-01-12 14:52:22 +01:00
adev - > gmc . mc_mask = 0xffffffffffffULL ; /* 48 bit MC */
2017-03-09 11:36:26 -05:00
2019-08-15 09:27:03 +02:00
r = dma_set_mask_and_coherent ( adev - > dev , DMA_BIT_MASK ( 44 ) ) ;
2017-03-09 11:36:26 -05:00
if ( r ) {
printk ( KERN_WARNING " amdgpu: No suitable DMA available. \n " ) ;
2019-08-15 09:27:03 +02:00
return r ;
2017-03-09 11:36:26 -05:00
}
2019-08-15 09:27:03 +02:00
adev - > need_swiotlb = drm_need_swiotlb ( 44 ) ;
2017-03-09 11:36:26 -05:00
2018-11-30 15:29:43 -05:00
if ( adev - > gmc . xgmi . supported ) {
2018-06-19 17:03:27 -05:00
r = gfxhub_v1_1_get_xgmi_info ( adev ) ;
if ( r )
return r ;
}
2017-03-09 11:36:26 -05:00
r = gmc_v9_0_mc_init ( adev ) ;
if ( r )
return r ;
2018-04-06 14:54:09 -05:00
adev - > gmc . stolen_size = gmc_v9_0_get_vbios_fb_size ( adev ) ;
2017-03-09 11:36:26 -05:00
/* Memory manager */
r = amdgpu_bo_init ( adev ) ;
if ( r )
return r ;
r = gmc_v9_0_gart_init ( adev ) ;
if ( r )
return r ;
2017-05-11 16:21:20 +02:00
/*
* number of VMs
* VMID 0 is reserved for System
* amdgpu graphics / compute will use VMIDs 1 - 7
* amdkfd will use VMIDs 8 - 15
*/
2019-07-16 13:29:19 -05:00
adev - > vm_manager . id_mgr [ AMDGPU_GFXHUB_0 ] . num_ids = AMDGPU_NUM_OF_VMIDS ;
adev - > vm_manager . id_mgr [ AMDGPU_MMHUB_0 ] . num_ids = AMDGPU_NUM_OF_VMIDS ;
2018-08-31 14:46:47 +08:00
adev - > vm_manager . id_mgr [ AMDGPU_MMHUB_1 ] . num_ids = AMDGPU_NUM_OF_VMIDS ;
2017-05-11 16:21:20 +02:00
amdgpu_vm_manager_init ( adev ) ;
return 0 ;
2017-03-09 11:36:26 -05:00
}
static int gmc_v9_0_sw_fini ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2019-08-28 18:51:19 +08:00
void * stolen_vga_buf ;
2017-03-09 11:36:26 -05:00
2019-09-12 17:39:47 +08:00
amdgpu_gmc_ras_fini ( adev ) ;
2017-11-14 11:55:50 +08:00
amdgpu_gem_force_release ( adev ) ;
2017-05-11 16:21:20 +02:00
amdgpu_vm_manager_fini ( adev ) ;
2018-04-06 14:54:10 -05:00
2018-08-30 09:31:56 -05:00
if ( gmc_v9_0_keep_stolen_memory ( adev ) )
2019-08-28 18:51:19 +08:00
amdgpu_bo_free_kernel ( & adev - > stolen_vga_memory , NULL , & stolen_vga_buf ) ;
2018-04-06 14:54:10 -05:00
2018-08-22 10:07:35 -04:00
amdgpu_gart_table_vram_free ( adev ) ;
2017-03-09 11:36:26 -05:00
amdgpu_bo_fini ( adev ) ;
2018-08-22 10:07:35 -04:00
amdgpu_gart_fini ( adev ) ;
2017-03-09 11:36:26 -05:00
return 0 ;
}
static void gmc_v9_0_init_golden_registers ( struct amdgpu_device * adev )
{
2017-11-28 17:01:21 -05:00
2017-03-09 11:36:26 -05:00
switch ( adev - > asic_type ) {
case CHIP_VEGA10 :
2019-07-30 17:21:19 +08:00
if ( amdgpu_sriov_vf ( adev ) )
2019-03-04 12:30:58 +08:00
break ;
/* fall through */
2018-04-20 13:56:43 +08:00
case CHIP_VEGA20 :
2017-11-28 17:01:21 -05:00
soc15_program_register_sequence ( adev ,
2017-09-20 16:25:40 +08:00
golden_settings_mmhub_1_0_0 ,
2017-11-03 15:59:25 +01:00
ARRAY_SIZE ( golden_settings_mmhub_1_0_0 ) ) ;
2017-11-28 17:01:21 -05:00
soc15_program_register_sequence ( adev ,
2017-09-20 16:25:40 +08:00
golden_settings_athub_1_0_0 ,
2017-11-03 15:59:25 +01:00
ARRAY_SIZE ( golden_settings_athub_1_0_0 ) ) ;
2017-03-09 11:36:26 -05:00
break ;
2018-03-13 20:25:08 -05:00
case CHIP_VEGA12 :
break ;
2016-12-08 11:28:45 +08:00
case CHIP_RAVEN :
2019-07-24 13:42:16 -05:00
/* TODO for renoir */
2017-11-28 17:01:21 -05:00
soc15_program_register_sequence ( adev ,
2017-09-20 16:25:40 +08:00
golden_settings_athub_1_0_0 ,
2017-11-03 15:59:25 +01:00
ARRAY_SIZE ( golden_settings_athub_1_0_0 ) ) ;
2016-12-08 11:28:45 +08:00
break ;
2017-03-09 11:36:26 -05:00
default :
break ;
}
}
2020-01-27 16:35:24 +05:30
/**
* gmc_v9_0_restore_registers - restores regs
*
* @ adev : amdgpu_device pointer
*
* This restores register values , saved at suspend .
*/
static void gmc_v9_0_restore_registers ( struct amdgpu_device * adev )
{
if ( adev - > asic_type = = CHIP_RAVEN )
WREG32 ( mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 , adev - > gmc . sdpif_register ) ;
}
2017-03-09 11:36:26 -05:00
/**
* gmc_v9_0_gart_enable - gart enable
*
* @ adev : amdgpu_device pointer
*/
static int gmc_v9_0_gart_enable ( struct amdgpu_device * adev )
{
2019-10-07 15:21:03 -05:00
int r ;
2017-03-09 11:36:26 -05:00
2018-08-21 17:07:47 +02:00
if ( adev - > gart . bo = = NULL ) {
2017-03-09 11:36:26 -05:00
dev_err ( adev - > dev , " No VRAM object for PCIE GART. \n " ) ;
return - EINVAL ;
}
2017-11-21 13:29:14 +08:00
r = amdgpu_gart_table_vram_pin ( adev ) ;
if ( r )
return r ;
2017-03-09 11:36:26 -05:00
r = gfxhub_v1_0_gart_enable ( adev ) ;
if ( r )
return r ;
2018-09-04 15:29:52 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
r = mmhub_v9_4_gart_enable ( adev ) ;
else
r = mmhub_v1_0_gart_enable ( adev ) ;
2017-03-09 11:36:26 -05:00
if ( r )
return r ;
2019-10-07 15:21:03 -05:00
DRM_INFO ( " PCIE GART of %uM enabled (table at 0x%016llX). \n " ,
( unsigned ) ( adev - > gmc . gart_size > > 20 ) ,
( unsigned long long ) amdgpu_bo_gpu_offset ( adev - > gart . bo ) ) ;
adev - > gart . ready = true ;
return 0 ;
}
static int gmc_v9_0_hw_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
bool value ;
int r , i ;
u32 tmp ;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers ( adev ) ;
if ( adev - > mode_info . num_crtc ) {
if ( adev - > asic_type ! = CHIP_ARCTURUS ) {
/* Lockout access through VGA aperture*/
WREG32_FIELD15 ( DCE , 0 , VGA_HDP_CONTROL , VGA_MEMORY_DISABLE , 1 ) ;
/* disable VGA render */
WREG32_FIELD15 ( DCE , 0 , VGA_RENDER_CONTROL , VGA_VSTATUS_CNTL , 0 ) ;
}
}
amdgpu_device_program_register_sequence ( adev ,
golden_settings_vega10_hdp ,
ARRAY_SIZE ( golden_settings_vega10_hdp ) ) ;
switch ( adev - > asic_type ) {
case CHIP_RAVEN :
/* TODO for renoir */
mmhub_v1_0_update_power_gating ( adev , true ) ;
break ;
2019-10-07 15:32:23 -05:00
case CHIP_ARCTURUS :
WREG32_FIELD15 ( HDP , 0 , HDP_MMHUB_CNTL , HDP_MMHUB_GCC , 1 ) ;
break ;
2019-10-07 15:21:03 -05:00
default :
break ;
}
2017-09-01 09:52:21 -04:00
WREG32_FIELD15 ( HDP , 0 , HDP_MISC_CNTL , FLUSH_INVALIDATE_CACHE , 1 ) ;
2017-03-09 11:36:26 -05:00
2017-06-01 15:33:26 +08:00
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_HOST_PATH_CNTL ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_HOST_PATH_CNTL , tmp ) ;
2017-03-09 11:36:26 -05:00
2019-05-14 10:03:35 +08:00
WREG32_SOC15 ( HDP , 0 , mmHDP_NONSURFACE_BASE , ( adev - > gmc . vram_start > > 8 ) ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_NONSURFACE_BASE_HI , ( adev - > gmc . vram_start > > 40 ) ) ;
2017-09-15 15:03:24 +08:00
/* After HDP is initialized, flush HDP.*/
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > hdp_flush ( adev , NULL ) ;
2017-09-15 15:03:24 +08:00
2017-03-09 11:36:26 -05:00
if ( amdgpu_vm_fault_stop = = AMDGPU_VM_FAULT_STOP_ALWAYS )
value = false ;
else
value = true ;
2019-11-14 16:53:58 -05:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
2019-12-02 09:50:19 -05:00
gfxhub_v1_0_set_fault_enable_default ( adev , value ) ;
2019-11-14 16:53:58 -05:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
mmhub_v9_4_set_fault_enable_default ( adev , value ) ;
else
mmhub_v1_0_set_fault_enable_default ( adev , value ) ;
}
2019-08-01 14:55:45 -05:00
for ( i = 0 ; i < adev - > num_vmhubs ; + + i )
gmc_v9_0_flush_gpu_tlb ( adev , 0 , i , 0 ) ;
2017-03-09 11:36:26 -05:00
2019-09-24 16:08:00 +08:00
if ( adev - > umc . funcs & & adev - > umc . funcs - > init_registers )
adev - > umc . funcs - > init_registers ( adev ) ;
2017-03-09 11:36:26 -05:00
r = gmc_v9_0_gart_enable ( adev ) ;
return r ;
}
2020-01-27 16:35:24 +05:30
/**
* gmc_v9_0_save_registers - saves regs
*
* @ adev : amdgpu_device pointer
*
* This saves potential register values that should be
* restored upon resume
*/
static void gmc_v9_0_save_registers ( struct amdgpu_device * adev )
{
if ( adev - > asic_type = = CHIP_RAVEN )
adev - > gmc . sdpif_register = RREG32 ( mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 ) ;
}
2017-03-09 11:36:26 -05:00
/**
* gmc_v9_0_gart_disable - gart disable
*
* @ adev : amdgpu_device pointer
*
* This disables all VM page table .
*/
static void gmc_v9_0_gart_disable ( struct amdgpu_device * adev )
{
gfxhub_v1_0_gart_disable ( adev ) ;
2018-09-04 15:29:52 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
mmhub_v9_4_gart_disable ( adev ) ;
else
mmhub_v1_0_gart_disable ( adev ) ;
2017-11-21 13:29:14 +08:00
amdgpu_gart_table_vram_unpin ( adev ) ;
2017-03-09 11:36:26 -05:00
}
static int gmc_v9_0_hw_fini ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2017-04-26 02:29:47 -04:00
if ( amdgpu_sriov_vf ( adev ) ) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG ( " For SRIOV client, shouldn't do anything. \n " ) ;
return 0 ;
}
2019-01-23 19:03:25 +08:00
amdgpu_irq_put ( adev , & adev - > gmc . ecc_irq , 0 ) ;
2018-01-12 14:52:22 +01:00
amdgpu_irq_put ( adev , & adev - > gmc . vm_fault , 0 ) ;
2017-03-09 11:36:26 -05:00
gmc_v9_0_gart_disable ( adev ) ;
return 0 ;
}
static int gmc_v9_0_suspend ( void * handle )
{
2020-01-27 16:35:24 +05:30
int r ;
2017-03-09 11:36:26 -05:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2020-01-27 16:35:24 +05:30
r = gmc_v9_0_hw_fini ( adev ) ;
if ( r )
return r ;
gmc_v9_0_save_registers ( adev ) ;
return 0 ;
2017-03-09 11:36:26 -05:00
}
static int gmc_v9_0_resume ( void * handle )
{
int r ;
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2020-01-27 16:35:24 +05:30
gmc_v9_0_restore_registers ( adev ) ;
2017-03-09 11:36:26 -05:00
r = gmc_v9_0_hw_init ( adev ) ;
if ( r )
return r ;
2017-12-18 16:53:03 +01:00
amdgpu_vmid_reset_all ( adev ) ;
2017-03-09 11:36:26 -05:00
2017-05-10 20:06:58 +02:00
return 0 ;
2017-03-09 11:36:26 -05:00
}
static bool gmc_v9_0_is_idle ( void * handle )
{
/* MC is always ready in GMC v9.*/
return true ;
}
static int gmc_v9_0_wait_for_idle ( void * handle )
{
/* There is no need to wait for MC idle in GMC v9.*/
return 0 ;
}
static int gmc_v9_0_soft_reset ( void * handle )
{
/* XXX for emulation.*/
return 0 ;
}
static int gmc_v9_0_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state )
{
2017-05-31 23:13:34 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-09-04 15:29:52 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
2019-08-09 18:57:15 +08:00
mmhub_v9_4_set_clockgating ( adev , state ) ;
else
mmhub_v1_0_set_clockgating ( adev , state ) ;
2019-08-08 14:54:12 +08:00
athub_v1_0_set_clockgating ( adev , state ) ;
return 0 ;
2017-03-09 11:36:26 -05:00
}
2017-05-31 23:35:44 +08:00
static void gmc_v9_0_get_clockgating_state ( void * handle , u32 * flags )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2018-09-04 15:29:52 +08:00
if ( adev - > asic_type = = CHIP_ARCTURUS )
2019-08-09 18:57:15 +08:00
mmhub_v9_4_get_clockgating ( adev , flags ) ;
else
mmhub_v1_0_get_clockgating ( adev , flags ) ;
2019-08-08 14:54:12 +08:00
athub_v1_0_get_clockgating ( adev , flags ) ;
2017-05-31 23:35:44 +08:00
}
2017-03-09 11:36:26 -05:00
static int gmc_v9_0_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
{
return 0 ;
}
const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
. name = " gmc_v9_0 " ,
. early_init = gmc_v9_0_early_init ,
. late_init = gmc_v9_0_late_init ,
. sw_init = gmc_v9_0_sw_init ,
. sw_fini = gmc_v9_0_sw_fini ,
. hw_init = gmc_v9_0_hw_init ,
. hw_fini = gmc_v9_0_hw_fini ,
. suspend = gmc_v9_0_suspend ,
. resume = gmc_v9_0_resume ,
. is_idle = gmc_v9_0_is_idle ,
. wait_for_idle = gmc_v9_0_wait_for_idle ,
. soft_reset = gmc_v9_0_soft_reset ,
. set_clockgating_state = gmc_v9_0_set_clockgating_state ,
. set_powergating_state = gmc_v9_0_set_powergating_state ,
2017-05-31 23:35:44 +08:00
. get_clockgating_state = gmc_v9_0_get_clockgating_state ,
2017-03-09 11:36:26 -05:00
} ;
const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_GMC ,
. major = 9 ,
. minor = 0 ,
. rev = 0 ,
. funcs = & gmc_v9_0_ip_funcs ,
} ;