2019-03-04 16:27:14 +08:00
/*
* Copyright 2019 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include <linux/firmware.h>
2019-06-25 08:42:25 -05:00
# include <linux/pci.h>
2019-03-04 16:27:14 +08:00
# include "amdgpu.h"
# include "amdgpu_atomfirmware.h"
# include "gmc_v10_0.h"
# include "hdp/hdp_5_0_0_offset.h"
# include "hdp/hdp_5_0_0_sh_mask.h"
# include "gc/gc_10_1_0_sh_mask.h"
# include "mmhub/mmhub_2_0_0_sh_mask.h"
2019-12-19 23:40:19 -06:00
# include "athub/athub_2_0_0_sh_mask.h"
# include "athub/athub_2_0_0_offset.h"
2019-03-04 16:27:14 +08:00
# include "dcn/dcn_2_0_0_offset.h"
# include "dcn/dcn_2_0_0_sh_mask.h"
# include "oss/osssys_5_0_0_offset.h"
# include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
# include "navi10_enum.h"
# include "soc15.h"
2019-12-19 23:40:19 -06:00
# include "soc15d.h"
2019-03-04 16:27:14 +08:00
# include "soc15_common.h"
# include "nbio_v2_3.h"
# include "gfxhub_v2_0.h"
# include "mmhub_v2_0.h"
# include "athub_v2_0.h"
/* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
# define AMDGPU_NUM_OF_VMIDS 8
#if 0
static const struct soc15_reg_golden golden_settings_navi10_hdp [ ] =
{
/* TODO add golden setting for hdp */
} ;
# endif
static int
gmc_v10_0_vm_fault_interrupt_state ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * src , unsigned type ,
enum amdgpu_interrupt_state state )
{
struct amdgpu_vmhub * hub ;
u32 tmp , reg , bits [ AMDGPU_MAX_VMHUBS ] , i ;
2019-07-16 13:29:19 -05:00
bits [ AMDGPU_GFXHUB_0 ] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
2019-03-04 16:27:14 +08:00
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK ;
2019-07-16 13:29:19 -05:00
bits [ AMDGPU_MMHUB_0 ] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
2019-03-04 16:27:14 +08:00
MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK ;
switch ( state ) {
case AMDGPU_IRQ_STATE_DISABLE :
/* MM HUB */
2019-07-16 13:29:19 -05:00
hub = & adev - > vmhub [ AMDGPU_MMHUB_0 ] ;
2019-03-04 16:27:14 +08:00
for ( i = 0 ; i < 16 ; i + + ) {
reg = hub - > vm_context0_cntl + i ;
tmp = RREG32 ( reg ) ;
2019-07-16 13:29:19 -05:00
tmp & = ~ bits [ AMDGPU_MMHUB_0 ] ;
2019-03-04 16:27:14 +08:00
WREG32 ( reg , tmp ) ;
}
/* GFX HUB */
2019-07-16 13:29:19 -05:00
hub = & adev - > vmhub [ AMDGPU_GFXHUB_0 ] ;
2019-03-04 16:27:14 +08:00
for ( i = 0 ; i < 16 ; i + + ) {
reg = hub - > vm_context0_cntl + i ;
tmp = RREG32 ( reg ) ;
2019-07-16 13:29:19 -05:00
tmp & = ~ bits [ AMDGPU_GFXHUB_0 ] ;
2019-03-04 16:27:14 +08:00
WREG32 ( reg , tmp ) ;
}
break ;
case AMDGPU_IRQ_STATE_ENABLE :
/* MM HUB */
2019-07-16 13:29:19 -05:00
hub = & adev - > vmhub [ AMDGPU_MMHUB_0 ] ;
2019-03-04 16:27:14 +08:00
for ( i = 0 ; i < 16 ; i + + ) {
reg = hub - > vm_context0_cntl + i ;
tmp = RREG32 ( reg ) ;
2019-07-16 13:29:19 -05:00
tmp | = bits [ AMDGPU_MMHUB_0 ] ;
2019-03-04 16:27:14 +08:00
WREG32 ( reg , tmp ) ;
}
/* GFX HUB */
2019-07-16 13:29:19 -05:00
hub = & adev - > vmhub [ AMDGPU_GFXHUB_0 ] ;
2019-03-04 16:27:14 +08:00
for ( i = 0 ; i < 16 ; i + + ) {
reg = hub - > vm_context0_cntl + i ;
tmp = RREG32 ( reg ) ;
2019-07-16 13:29:19 -05:00
tmp | = bits [ AMDGPU_GFXHUB_0 ] ;
2019-03-04 16:27:14 +08:00
WREG32 ( reg , tmp ) ;
}
break ;
default :
break ;
}
return 0 ;
}
static int gmc_v10_0_process_interrupt ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * source ,
struct amdgpu_iv_entry * entry )
{
struct amdgpu_vmhub * hub = & adev - > vmhub [ entry - > vmid_src ] ;
uint32_t status = 0 ;
u64 addr ;
addr = ( u64 ) entry - > src_data [ 0 ] < < 12 ;
addr | = ( ( u64 ) entry - > src_data [ 1 ] & 0xf ) < < 44 ;
if ( ! amdgpu_sriov_vf ( adev ) ) {
2019-08-16 16:13:28 +08:00
/*
* Issue a dummy read to wait for the status register to
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface .
*/
if ( entry - > vmid_src = = AMDGPU_GFXHUB_0 )
RREG32 ( hub - > vm_l2_pro_fault_status ) ;
2019-03-04 16:27:14 +08:00
status = RREG32 ( hub - > vm_l2_pro_fault_status ) ;
WREG32_P ( hub - > vm_l2_pro_fault_cntl , 1 , ~ 1 ) ;
}
if ( printk_ratelimit ( ) ) {
2019-08-07 16:21:09 -04:00
struct amdgpu_task_info task_info ;
memset ( & task_info , 0 , sizeof ( struct amdgpu_task_info ) ) ;
amdgpu_vm_get_task_info ( adev , entry - > pasid , & task_info ) ;
2019-03-04 16:27:14 +08:00
dev_err ( adev - > dev ,
2019-08-07 16:21:09 -04:00
" [%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
" for process %s pid %d thread %s pid %d) \n " ,
2019-03-04 16:27:14 +08:00
entry - > vmid_src ? " mmhub " : " gfxhub " ,
entry - > src_id , entry - > ring_id , entry - > vmid ,
2019-08-07 16:21:09 -04:00
entry - > pasid , task_info . process_name , task_info . tgid ,
task_info . task_name , task_info . pid ) ;
dev_err ( adev - > dev , " in page starting at address 0x%016llx from client %d \n " ,
2019-03-04 16:27:14 +08:00
addr , entry - > client_id ) ;
2019-08-07 16:21:09 -04:00
if ( ! amdgpu_sriov_vf ( adev ) ) {
2019-03-04 16:27:14 +08:00
dev_err ( adev - > dev ,
2019-08-07 16:21:09 -04:00
" GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X \n " ,
2019-03-04 16:27:14 +08:00
status ) ;
2019-08-07 16:21:09 -04:00
dev_err ( adev - > dev , " \t MORE_FAULTS: 0x%lx \n " ,
REG_GET_FIELD ( status ,
GCVM_L2_PROTECTION_FAULT_STATUS , MORE_FAULTS ) ) ;
dev_err ( adev - > dev , " \t WALKER_ERROR: 0x%lx \n " ,
REG_GET_FIELD ( status ,
GCVM_L2_PROTECTION_FAULT_STATUS , WALKER_ERROR ) ) ;
dev_err ( adev - > dev , " \t PERMISSION_FAULTS: 0x%lx \n " ,
REG_GET_FIELD ( status ,
GCVM_L2_PROTECTION_FAULT_STATUS , PERMISSION_FAULTS ) ) ;
dev_err ( adev - > dev , " \t MAPPING_ERROR: 0x%lx \n " ,
REG_GET_FIELD ( status ,
GCVM_L2_PROTECTION_FAULT_STATUS , MAPPING_ERROR ) ) ;
dev_err ( adev - > dev , " \t RW: 0x%lx \n " ,
REG_GET_FIELD ( status ,
GCVM_L2_PROTECTION_FAULT_STATUS , RW ) ) ;
}
2019-03-04 16:27:14 +08:00
}
return 0 ;
}
static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
. set = gmc_v10_0_vm_fault_interrupt_state ,
. process = gmc_v10_0_process_interrupt ,
} ;
static void gmc_v10_0_set_irq_funcs ( struct amdgpu_device * adev )
{
adev - > gmc . vm_fault . num_types = 1 ;
adev - > gmc . vm_fault . funcs = & gmc_v10_0_irq_funcs ;
}
static uint32_t gmc_v10_0_get_invalidate_req ( unsigned int vmid ,
uint32_t flush_type )
{
u32 req = 0 ;
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ ,
PER_VMID_INVALIDATE_REQ , 1 < < vmid ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ , FLUSH_TYPE , flush_type ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PTES , 1 ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PDE0 , 1 ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PDE1 , 1 ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ , INVALIDATE_L2_PDE2 , 1 ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ , INVALIDATE_L1_PTES , 1 ) ;
req = REG_SET_FIELD ( req , GCVM_INVALIDATE_ENG0_REQ ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR , 0 ) ;
return req ;
}
2019-12-10 22:50:16 +08:00
/**
* gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
*
* @ adev : amdgpu_device pointer
* @ vmhub : vmhub type
*
*/
static bool gmc_v10_0_use_invalidate_semaphore ( struct amdgpu_device * adev ,
uint32_t vmhub )
{
return ( ( vmhub = = AMDGPU_MMHUB_0 | |
vmhub = = AMDGPU_MMHUB_1 ) & &
( ! amdgpu_sriov_vf ( adev ) ) ) ;
}
2019-12-19 23:40:19 -06:00
static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info (
struct amdgpu_device * adev ,
uint8_t vmid , uint16_t * p_pasid )
{
uint32_t value ;
value = RREG32 ( SOC15_REG_OFFSET ( ATHUB , 0 , mmATC_VMID0_PASID_MAPPING )
+ vmid ) ;
* p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK ;
return ! ! ( value & ATC_VMID0_PASID_MAPPING__VALID_MASK ) ;
}
2019-03-04 16:27:14 +08:00
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel .
* VMIDs 1 - 15 are used for userspace clients and are handled
* by the amdgpu vm / hsa code .
*/
static void gmc_v10_0_flush_vm_hub ( struct amdgpu_device * adev , uint32_t vmid ,
unsigned int vmhub , uint32_t flush_type )
{
2019-12-10 22:50:16 +08:00
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore ( adev , vmhub ) ;
2019-03-04 16:27:14 +08:00
struct amdgpu_vmhub * hub = & adev - > vmhub [ vmhub ] ;
2020-01-17 19:54:45 -05:00
u32 inv_req = gmc_v10_0_get_invalidate_req ( vmid , flush_type ) ;
u32 tmp ;
2019-03-04 16:27:14 +08:00
/* Use register 17 for GART */
const unsigned eng = 17 ;
unsigned int i ;
2019-11-19 11:13:29 +08:00
spin_lock ( & adev - > gmc . invalidate_lock ) ;
/*
* It may lose gpuvm invalidate acknowldege state across power - gating
* off cycle , add semaphore acquire before invalidation and semaphore
* release after invalidation to avoid entering power gated state
* to WA the Issue
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:50:16 +08:00
if ( use_semaphore ) {
2019-11-19 11:13:29 +08:00
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
/* a read return value of 1 means semaphore acuqire */
tmp = RREG32_NO_KIQ ( hub - > vm_inv_eng0_sem + eng ) ;
if ( tmp & 0x1 )
break ;
udelay ( 1 ) ;
}
if ( i > = adev - > usec_timeout )
DRM_ERROR ( " Timeout waiting for sem acquire in VM flush! \n " ) ;
}
2020-01-17 19:54:45 -05:00
WREG32_NO_KIQ ( hub - > vm_inv_eng0_req + eng , inv_req ) ;
2019-03-04 16:27:14 +08:00
2019-08-16 16:13:28 +08:00
/*
* Issue a dummy read to wait for the ACK register to be cleared
* to avoid a false ACK due to the new fast GRBM interface .
*/
if ( vmhub = = AMDGPU_GFXHUB_0 )
RREG32_NO_KIQ ( hub - > vm_inv_eng0_req + eng ) ;
2019-03-04 16:27:14 +08:00
/* Wait for ACK with a delay.*/
for ( i = 0 ; i < adev - > usec_timeout ; i + + ) {
tmp = RREG32_NO_KIQ ( hub - > vm_inv_eng0_ack + eng ) ;
tmp & = 1 < < vmid ;
if ( tmp )
break ;
udelay ( 1 ) ;
}
2019-11-19 11:13:29 +08:00
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:50:16 +08:00
if ( use_semaphore )
2019-11-19 11:13:29 +08:00
/*
* add semaphore release after invalidation ,
* write with 0 means semaphore release
*/
WREG32_NO_KIQ ( hub - > vm_inv_eng0_sem + eng , 0 ) ;
spin_unlock ( & adev - > gmc . invalidate_lock ) ;
2019-03-04 16:27:14 +08:00
if ( i < adev - > usec_timeout )
return ;
DRM_ERROR ( " Timeout waiting for VM flush ACK! \n " ) ;
}
/**
* gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
*
* @ adev : amdgpu_device pointer
* @ vmid : vm instance to flush
*
* Flush the TLB for the requested page table .
*/
2019-08-01 14:55:45 -05:00
static void gmc_v10_0_flush_gpu_tlb ( struct amdgpu_device * adev , uint32_t vmid ,
uint32_t vmhub , uint32_t flush_type )
2019-03-04 16:27:14 +08:00
{
struct amdgpu_ring * ring = adev - > mman . buffer_funcs_ring ;
struct dma_fence * fence ;
struct amdgpu_job * job ;
int r ;
/* flush hdp cache */
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > hdp_flush ( adev , NULL ) ;
2019-03-04 16:27:14 +08:00
mutex_lock ( & adev - > mman . gtt_window_lock ) ;
2019-08-01 14:55:45 -05:00
if ( vmhub = = AMDGPU_MMHUB_0 ) {
gmc_v10_0_flush_vm_hub ( adev , vmid , AMDGPU_MMHUB_0 , 0 ) ;
mutex_unlock ( & adev - > mman . gtt_window_lock ) ;
return ;
}
BUG_ON ( vmhub ! = AMDGPU_GFXHUB_0 ) ;
2019-07-05 15:58:46 -05:00
if ( ! adev - > mman . buffer_funcs_enabled | |
! adev - > ib_pool_ready | |
2019-11-26 19:40:08 +08:00
adev - > in_gpu_reset | |
ring - > sched . ready = = false ) {
2019-07-16 13:29:19 -05:00
gmc_v10_0_flush_vm_hub ( adev , vmid , AMDGPU_GFXHUB_0 , 0 ) ;
2019-03-04 16:27:14 +08:00
mutex_unlock ( & adev - > mman . gtt_window_lock ) ;
return ;
}
/* The SDMA on Navi has a bug which can theoretically result in memory
* corruption if an invalidation happens at the same time as an VA
* translation . Avoid this by doing the invalidation from the SDMA
* itself .
*/
r = amdgpu_job_alloc_with_ib ( adev , 16 * 4 , & job ) ;
if ( r )
goto error_alloc ;
job - > vm_pd_addr = amdgpu_gmc_pd_addr ( adev - > gart . bo ) ;
job - > vm_needs_flush = true ;
2019-10-22 19:22:11 +02:00
job - > ibs - > ptr [ job - > ibs - > length_dw + + ] = ring - > funcs - > nop ;
2019-03-04 16:27:14 +08:00
amdgpu_ring_pad_ib ( ring , & job - > ibs [ 0 ] ) ;
r = amdgpu_job_submit ( job , & adev - > mman . entity ,
AMDGPU_FENCE_OWNER_UNDEFINED , & fence ) ;
if ( r )
goto error_submit ;
mutex_unlock ( & adev - > mman . gtt_window_lock ) ;
dma_fence_wait ( fence , false ) ;
dma_fence_put ( fence ) ;
return ;
error_submit :
amdgpu_job_free ( job ) ;
error_alloc :
mutex_unlock ( & adev - > mman . gtt_window_lock ) ;
DRM_ERROR ( " Error flushing GPU TLB using the SDMA (%d)! \n " , r ) ;
}
2019-12-19 23:40:19 -06:00
/**
* gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @ adev : amdgpu_device pointer
* @ pasid : pasid to be flush
*
* Flush the TLB for the requested pasid .
*/
static int gmc_v10_0_flush_gpu_tlb_pasid ( struct amdgpu_device * adev ,
uint16_t pasid , uint32_t flush_type ,
bool all_hub )
{
int vmid , i ;
signed long r ;
uint32_t seq ;
uint16_t queried_pasid ;
bool ret ;
struct amdgpu_ring * ring = & adev - > gfx . kiq . ring ;
struct amdgpu_kiq * kiq = & adev - > gfx . kiq ;
if ( amdgpu_emu_mode = = 0 & & ring - > sched . ready ) {
spin_lock ( & adev - > gfx . kiq . ring_lock ) ;
2020-01-13 21:27:56 -06:00
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc ( ring , kiq - > pmf - > invalidate_tlbs_size + 8 ) ;
2019-12-19 23:40:19 -06:00
kiq - > pmf - > kiq_invalidate_tlbs ( ring ,
pasid , flush_type , all_hub ) ;
amdgpu_fence_emit_polling ( ring , & seq ) ;
amdgpu_ring_commit ( ring ) ;
spin_unlock ( & adev - > gfx . kiq . ring_lock ) ;
r = amdgpu_fence_wait_polling ( ring , seq , adev - > usec_timeout ) ;
if ( r < 1 ) {
DRM_ERROR ( " wait for kiq fence error: %ld. \n " , r ) ;
return - ETIME ;
}
return 0 ;
}
for ( vmid = 1 ; vmid < 16 ; vmid + + ) {
ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info ( adev , vmid ,
& queried_pasid ) ;
if ( ret & & queried_pasid = = pasid ) {
if ( all_hub ) {
for ( i = 0 ; i < adev - > num_vmhubs ; i + + )
gmc_v10_0_flush_gpu_tlb ( adev , vmid ,
2020-01-17 20:08:42 -05:00
i , flush_type ) ;
2019-12-19 23:40:19 -06:00
} else {
gmc_v10_0_flush_gpu_tlb ( adev , vmid ,
2020-01-17 20:08:42 -05:00
AMDGPU_GFXHUB_0 , flush_type ) ;
2019-12-19 23:40:19 -06:00
}
break ;
}
}
return 0 ;
}
2019-03-04 16:27:14 +08:00
static uint64_t gmc_v10_0_emit_flush_gpu_tlb ( struct amdgpu_ring * ring ,
unsigned vmid , uint64_t pd_addr )
{
2019-12-10 22:50:16 +08:00
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore ( ring - > adev , ring - > funcs - > vmhub ) ;
2019-03-04 16:27:14 +08:00
struct amdgpu_vmhub * hub = & ring - > adev - > vmhub [ ring - > funcs - > vmhub ] ;
uint32_t req = gmc_v10_0_get_invalidate_req ( vmid , 0 ) ;
unsigned eng = ring - > vm_inv_eng ;
2019-11-19 11:13:29 +08:00
/*
* It may lose gpuvm invalidate acknowldege state across power - gating
* off cycle , add semaphore acquire before invalidation and semaphore
* release after invalidation to avoid entering power gated state
* to WA the Issue
*/
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:50:16 +08:00
if ( use_semaphore )
2019-11-19 11:13:29 +08:00
/* a read return value of 1 means semaphore acuqire */
amdgpu_ring_emit_reg_wait ( ring ,
hub - > vm_inv_eng0_sem + eng , 0x1 , 0x1 ) ;
2019-03-04 16:27:14 +08:00
amdgpu_ring_emit_wreg ( ring , hub - > ctx0_ptb_addr_lo32 + ( 2 * vmid ) ,
lower_32_bits ( pd_addr ) ) ;
amdgpu_ring_emit_wreg ( ring , hub - > ctx0_ptb_addr_hi32 + ( 2 * vmid ) ,
upper_32_bits ( pd_addr ) ) ;
2019-10-10 11:02:33 +08:00
amdgpu_ring_emit_reg_write_reg_wait ( ring , hub - > vm_inv_eng0_req + eng ,
hub - > vm_inv_eng0_ack + eng ,
req , 1 < < vmid ) ;
2019-03-04 16:27:14 +08:00
2019-11-19 11:13:29 +08:00
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
2019-12-10 22:50:16 +08:00
if ( use_semaphore )
2019-11-19 11:13:29 +08:00
/*
* add semaphore release after invalidation ,
* write with 0 means semaphore release
*/
amdgpu_ring_emit_wreg ( ring , hub - > vm_inv_eng0_sem + eng , 0 ) ;
2019-03-04 16:27:14 +08:00
return pd_addr ;
}
static void gmc_v10_0_emit_pasid_mapping ( struct amdgpu_ring * ring , unsigned vmid ,
unsigned pasid )
{
struct amdgpu_device * adev = ring - > adev ;
uint32_t reg ;
2019-07-16 13:29:19 -05:00
if ( ring - > funcs - > vmhub = = AMDGPU_GFXHUB_0 )
2019-03-04 16:27:14 +08:00
reg = SOC15_REG_OFFSET ( OSSSYS , 0 , mmIH_VMID_0_LUT ) + vmid ;
else
reg = SOC15_REG_OFFSET ( OSSSYS , 0 , mmIH_VMID_0_LUT_MM ) + vmid ;
amdgpu_ring_emit_wreg ( ring , reg , pasid ) ;
}
/*
* PTE format on NAVI 10 :
* 63 : 59 reserved
* 58 : 57 reserved
* 56 F
* 55 L
* 54 reserved
* 53 : 52 SW
* 51 T
* 50 : 48 mtype
* 47 : 12 4 k physical page base address
* 11 : 7 fragment
* 6 write
* 5 read
* 4 exe
* 3 Z
* 2 snooped
* 1 system
* 0 valid
*
* PDE format on NAVI 10 :
* 63 : 59 block fragment size
* 58 : 55 reserved
* 54 P
* 53 : 48 reserved
* 47 : 6 physical base address of PD or PTE
* 5 : 3 reserved
* 2 C
* 1 system
* 0 valid
*/
2019-09-02 14:52:30 +02:00
static uint64_t gmc_v10_0_map_mtype ( struct amdgpu_device * adev , uint32_t flags )
{
switch ( flags ) {
2019-03-04 16:27:14 +08:00
case AMDGPU_VM_MTYPE_DEFAULT :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_NV10 ( MTYPE_NC ) ;
2019-03-04 16:27:14 +08:00
case AMDGPU_VM_MTYPE_NC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_NV10 ( MTYPE_NC ) ;
2019-03-04 16:27:14 +08:00
case AMDGPU_VM_MTYPE_WC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_NV10 ( MTYPE_WC ) ;
2019-03-04 16:27:14 +08:00
case AMDGPU_VM_MTYPE_CC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_NV10 ( MTYPE_CC ) ;
2019-03-04 16:27:14 +08:00
case AMDGPU_VM_MTYPE_UC :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_NV10 ( MTYPE_UC ) ;
2019-03-04 16:27:14 +08:00
default :
2019-09-02 14:52:30 +02:00
return AMDGPU_PTE_MTYPE_NV10 ( MTYPE_NC ) ;
2019-03-04 16:27:14 +08:00
}
}
static void gmc_v10_0_get_vm_pde ( struct amdgpu_device * adev , int level ,
uint64_t * addr , uint64_t * flags )
{
if ( ! ( * flags & AMDGPU_PDE_PTE ) & & ! ( * flags & AMDGPU_PTE_SYSTEM ) )
* addr = adev - > vm_manager . vram_base_offset + * addr -
adev - > gmc . vram_start ;
BUG_ON ( * addr & 0xFFFF00000000003FULL ) ;
if ( ! adev - > gmc . translate_further )
return ;
if ( level = = AMDGPU_VM_PDB1 ) {
/* Set the block fragment size */
if ( ! ( * flags & AMDGPU_PDE_PTE ) )
* flags | = AMDGPU_PDE_BFS ( 0x9 ) ;
} else if ( level = = AMDGPU_VM_PDB0 ) {
if ( * flags & AMDGPU_PDE_PTE )
* flags & = ~ AMDGPU_PDE_PTE ;
else
* flags | = AMDGPU_PTE_TF ;
}
}
2019-09-02 16:39:40 +02:00
static void gmc_v10_0_get_vm_pte ( struct amdgpu_device * adev ,
struct amdgpu_bo_va_mapping * mapping ,
uint64_t * flags )
{
* flags & = ~ AMDGPU_PTE_EXECUTABLE ;
* flags | = mapping - > flags & AMDGPU_PTE_EXECUTABLE ;
* flags & = ~ AMDGPU_PTE_MTYPE_NV10_MASK ;
* flags | = ( mapping - > flags & AMDGPU_PTE_MTYPE_NV10_MASK ) ;
if ( mapping - > flags & AMDGPU_PTE_PRT ) {
* flags | = AMDGPU_PTE_PRT ;
* flags | = AMDGPU_PTE_SNOOPED ;
* flags | = AMDGPU_PTE_LOG ;
* flags | = AMDGPU_PTE_SYSTEM ;
* flags & = ~ AMDGPU_PTE_VALID ;
}
}
2019-03-04 16:27:14 +08:00
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
. flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb ,
2019-12-19 23:40:19 -06:00
. flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid ,
2019-03-04 16:27:14 +08:00
. emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb ,
. emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping ,
2019-09-02 14:52:30 +02:00
. map_mtype = gmc_v10_0_map_mtype ,
2019-09-02 16:39:40 +02:00
. get_vm_pde = gmc_v10_0_get_vm_pde ,
. get_vm_pte = gmc_v10_0_get_vm_pte
2019-03-04 16:27:14 +08:00
} ;
static void gmc_v10_0_set_gmc_funcs ( struct amdgpu_device * adev )
{
if ( adev - > gmc . gmc_funcs = = NULL )
adev - > gmc . gmc_funcs = & gmc_v10_0_gmc_funcs ;
}
static int gmc_v10_0_early_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
gmc_v10_0_set_gmc_funcs ( adev ) ;
gmc_v10_0_set_irq_funcs ( adev ) ;
adev - > gmc . shared_aperture_start = 0x2000000000000000ULL ;
adev - > gmc . shared_aperture_end =
adev - > gmc . shared_aperture_start + ( 4ULL < < 30 ) - 1 ;
adev - > gmc . private_aperture_start = 0x1000000000000000ULL ;
adev - > gmc . private_aperture_end =
adev - > gmc . private_aperture_start + ( 4ULL < < 30 ) - 1 ;
return 0 ;
}
static int gmc_v10_0_late_init ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2020-01-06 13:21:56 -05:00
int r ;
2019-03-04 16:27:14 +08:00
2020-01-20 17:20:10 +08:00
amdgpu_bo_late_init ( adev ) ;
2020-01-08 22:14:24 -05:00
2020-01-06 13:21:56 -05:00
r = amdgpu_gmc_allocate_vm_inv_eng ( adev ) ;
if ( r )
return r ;
2019-03-04 16:27:14 +08:00
return amdgpu_irq_get ( adev , & adev - > gmc . vm_fault , 0 ) ;
}
static void gmc_v10_0_vram_gtt_location ( struct amdgpu_device * adev ,
struct amdgpu_gmc * mc )
{
u64 base = 0 ;
2019-09-16 14:56:06 +08:00
base = gfxhub_v2_0_get_fb_location ( adev ) ;
2019-03-04 16:27:14 +08:00
amdgpu_gmc_vram_location ( adev , & adev - > gmc , base ) ;
amdgpu_gmc_gart_location ( adev , mc ) ;
/* base offset of vram pages */
adev - > vm_manager . vram_base_offset = gfxhub_v2_0_get_mc_fb_offset ( adev ) ;
}
/**
* gmc_v10_0_mc_init - initialize the memory controller driver params
*
* @ adev : amdgpu_device pointer
*
* Look up the amount of vram , vram width , and decide how to place
* vram and gart within the GPU ' s physical address space .
* Returns 0 for success .
*/
static int gmc_v10_0_mc_init ( struct amdgpu_device * adev )
{
/* Could aper size report 0 ? */
adev - > gmc . aper_base = pci_resource_start ( adev - > pdev , 0 ) ;
adev - > gmc . aper_size = pci_resource_len ( adev - > pdev , 0 ) ;
/* size in MB on si */
adev - > gmc . mc_vram_size =
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > get_memsize ( adev ) * 1024ULL * 1024ULL ;
2019-03-04 16:27:14 +08:00
adev - > gmc . real_vram_size = adev - > gmc . mc_vram_size ;
adev - > gmc . visible_vram_size = adev - > gmc . aper_size ;
/* In case the PCI BAR is larger than the actual amount of vram */
if ( adev - > gmc . visible_vram_size > adev - > gmc . real_vram_size )
adev - > gmc . visible_vram_size = adev - > gmc . real_vram_size ;
/* set the gart size */
if ( amdgpu_gart_size = = - 1 ) {
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
2018-12-17 18:19:42 +08:00
case CHIP_NAVI14 :
2019-05-16 18:05:37 +08:00
case CHIP_NAVI12 :
2019-03-04 16:27:14 +08:00
default :
adev - > gmc . gart_size = 512ULL < < 20 ;
break ;
}
} else
adev - > gmc . gart_size = ( u64 ) amdgpu_gart_size < < 20 ;
gmc_v10_0_vram_gtt_location ( adev , & adev - > gmc ) ;
return 0 ;
}
static int gmc_v10_0_gart_init ( struct amdgpu_device * adev )
{
int r ;
if ( adev - > gart . bo ) {
WARN ( 1 , " NAVI10 PCIE GART already initialized \n " ) ;
return 0 ;
}
/* Initialize common gart structure */
r = amdgpu_gart_init ( adev ) ;
if ( r )
return r ;
adev - > gart . table_size = adev - > gart . num_gpu_pages * 8 ;
adev - > gart . gart_pte_flags = AMDGPU_PTE_MTYPE_NV10 ( MTYPE_UC ) |
AMDGPU_PTE_EXECUTABLE ;
return amdgpu_gart_table_vram_alloc ( adev ) ;
}
static unsigned gmc_v10_0_get_vbios_fb_size ( struct amdgpu_device * adev )
{
u32 d1vga_control = RREG32_SOC15 ( DCE , 0 , mmD1VGA_CONTROL ) ;
unsigned size ;
if ( REG_GET_FIELD ( d1vga_control , D1VGA_CONTROL , D1VGA_MODE_ENABLE ) ) {
size = 9 * 1024 * 1024 ; /* reserve 8MB for vga emulator and 1 MB for FB */
} else {
u32 viewport ;
u32 pitch ;
viewport = RREG32_SOC15 ( DCE , 0 , mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION ) ;
pitch = RREG32_SOC15 ( DCE , 0 , mmHUBPREQ0_DCSURF_SURFACE_PITCH ) ;
size = ( REG_GET_FIELD ( viewport ,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION , PRI_VIEWPORT_HEIGHT ) *
REG_GET_FIELD ( pitch , HUBPREQ0_DCSURF_SURFACE_PITCH , PITCH ) *
4 ) ;
}
/* return 0 if the pre-OS buffer uses up most of vram */
if ( ( adev - > gmc . real_vram_size - size ) < ( 8 * 1024 * 1024 ) ) {
DRM_ERROR ( " Warning: pre-OS buffer uses most of vram, \
be aware of gart table overwrite \ n " );
return 0 ;
}
return size ;
}
static int gmc_v10_0_sw_init ( void * handle )
{
2019-10-02 10:02:07 -04:00
int r , vram_width = 0 , vram_type = 0 , vram_vendor = 0 ;
2019-03-04 16:27:14 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
gfxhub_v2_0_init ( adev ) ;
mmhub_v2_0_init ( adev ) ;
spin_lock_init ( & adev - > gmc . invalidate_lock ) ;
2019-10-02 10:02:07 -04:00
r = amdgpu_atomfirmware_get_vram_info ( adev ,
& vram_width , & vram_type , & vram_vendor ) ;
2019-09-23 15:12:46 -05:00
if ( ! amdgpu_emu_mode )
adev - > gmc . vram_width = vram_width ;
else
adev - > gmc . vram_width = 1 * 128 ; /* numchan * chansize */
adev - > gmc . vram_type = vram_type ;
2019-10-02 10:02:07 -04:00
adev - > gmc . vram_vendor = vram_vendor ;
2019-03-04 16:27:14 +08:00
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
2018-12-17 18:19:42 +08:00
case CHIP_NAVI14 :
2019-05-16 18:05:37 +08:00
case CHIP_NAVI12 :
2018-08-31 14:17:28 +08:00
adev - > num_vmhubs = 2 ;
2019-03-04 16:27:14 +08:00
/*
* To fulfill 4 - level page support ,
2019-05-16 18:05:37 +08:00
* vm size is 256 TB ( 48 bit ) , maximum size of Navi10 / Navi14 / Navi12 ,
2019-03-04 16:27:14 +08:00
* block size 512 ( 9 bit )
*/
amdgpu_vm_adjust_size ( adev , 256 * 1024 , 9 , 3 , 48 ) ;
break ;
default :
break ;
}
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id ( adev , SOC15_IH_CLIENTID_VMC ,
VMC_1_0__SRCID__VM_FAULT ,
& adev - > gmc . vm_fault ) ;
2020-01-08 10:32:55 +01:00
if ( r )
return r ;
2019-03-04 16:27:14 +08:00
r = amdgpu_irq_add_id ( adev , SOC15_IH_CLIENTID_UTCL2 ,
UTCL2_1_0__SRCID__FAULT ,
& adev - > gmc . vm_fault ) ;
if ( r )
return r ;
/*
* Set the internal MC address mask This is the max address of the GPU ' s
* internal address space .
*/
adev - > gmc . mc_mask = 0xffffffffffffULL ; /* 48 bit MC */
2019-08-15 09:27:03 +02:00
r = dma_set_mask_and_coherent ( adev - > dev , DMA_BIT_MASK ( 44 ) ) ;
2019-03-04 16:27:14 +08:00
if ( r ) {
printk ( KERN_WARNING " amdgpu: No suitable DMA available. \n " ) ;
2019-08-15 09:27:03 +02:00
return r ;
2019-03-04 16:27:14 +08:00
}
r = gmc_v10_0_mc_init ( adev ) ;
if ( r )
return r ;
adev - > gmc . stolen_size = gmc_v10_0_get_vbios_fb_size ( adev ) ;
/* Memory manager */
r = amdgpu_bo_init ( adev ) ;
if ( r )
return r ;
r = gmc_v10_0_gart_init ( adev ) ;
if ( r )
return r ;
/*
* number of VMs
* VMID 0 is reserved for System
* amdgpu graphics / compute will use VMIDs 1 - 7
* amdkfd will use VMIDs 8 - 15
*/
2019-07-16 13:29:19 -05:00
adev - > vm_manager . id_mgr [ AMDGPU_GFXHUB_0 ] . num_ids = AMDGPU_NUM_OF_VMIDS ;
adev - > vm_manager . id_mgr [ AMDGPU_MMHUB_0 ] . num_ids = AMDGPU_NUM_OF_VMIDS ;
2019-03-04 16:27:14 +08:00
amdgpu_vm_manager_init ( adev ) ;
return 0 ;
}
/**
* gmc_v8_0_gart_fini - vm fini callback
*
* @ adev : amdgpu_device pointer
*
* Tears down the driver GART / VM setup ( CIK ) .
*/
static void gmc_v10_0_gart_fini ( struct amdgpu_device * adev )
{
amdgpu_gart_table_vram_free ( adev ) ;
amdgpu_gart_fini ( adev ) ;
}
static int gmc_v10_0_sw_fini ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
amdgpu_vm_manager_fini ( adev ) ;
gmc_v10_0_gart_fini ( adev ) ;
amdgpu_gem_force_release ( adev ) ;
amdgpu_bo_fini ( adev ) ;
return 0 ;
}
static void gmc_v10_0_init_golden_registers ( struct amdgpu_device * adev )
{
switch ( adev - > asic_type ) {
case CHIP_NAVI10 :
2018-12-17 18:19:42 +08:00
case CHIP_NAVI14 :
2019-05-16 18:05:37 +08:00
case CHIP_NAVI12 :
2019-03-04 16:27:14 +08:00
break ;
default :
break ;
}
}
/**
* gmc_v10_0_gart_enable - gart enable
*
* @ adev : amdgpu_device pointer
*/
static int gmc_v10_0_gart_enable ( struct amdgpu_device * adev )
{
int r ;
bool value ;
u32 tmp ;
if ( adev - > gart . bo = = NULL ) {
dev_err ( adev - > dev , " No VRAM object for PCIE GART. \n " ) ;
return - EINVAL ;
}
r = amdgpu_gart_table_vram_pin ( adev ) ;
if ( r )
return r ;
r = gfxhub_v2_0_gart_enable ( adev ) ;
if ( r )
return r ;
r = mmhub_v2_0_gart_enable ( adev ) ;
if ( r )
return r ;
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_MISC_CNTL ) ;
tmp | = HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK ;
WREG32_SOC15 ( HDP , 0 , mmHDP_MISC_CNTL , tmp ) ;
tmp = RREG32_SOC15 ( HDP , 0 , mmHDP_HOST_PATH_CNTL ) ;
WREG32_SOC15 ( HDP , 0 , mmHDP_HOST_PATH_CNTL , tmp ) ;
/* Flush HDP after it is initialized */
2019-08-23 19:39:18 +08:00
adev - > nbio . funcs - > hdp_flush ( adev , NULL ) ;
2019-03-04 16:27:14 +08:00
value = ( amdgpu_vm_fault_stop = = AMDGPU_VM_FAULT_STOP_ALWAYS ) ?
false : true ;
gfxhub_v2_0_set_fault_enable_default ( adev , value ) ;
mmhub_v2_0_set_fault_enable_default ( adev , value ) ;
2019-08-01 14:55:45 -05:00
gmc_v10_0_flush_gpu_tlb ( adev , 0 , AMDGPU_MMHUB_0 , 0 ) ;
gmc_v10_0_flush_gpu_tlb ( adev , 0 , AMDGPU_GFXHUB_0 , 0 ) ;
2019-03-04 16:27:14 +08:00
DRM_INFO ( " PCIE GART of %uM enabled (table at 0x%016llX). \n " ,
( unsigned ) ( adev - > gmc . gart_size > > 20 ) ,
( unsigned long long ) amdgpu_bo_gpu_offset ( adev - > gart . bo ) ) ;
adev - > gart . ready = true ;
return 0 ;
}
static int gmc_v10_0_hw_init ( void * handle )
{
int r ;
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
/* The sequence of these two function calls matters.*/
gmc_v10_0_init_golden_registers ( adev ) ;
r = gmc_v10_0_gart_enable ( adev ) ;
if ( r )
return r ;
return 0 ;
}
/**
* gmc_v10_0_gart_disable - gart disable
*
* @ adev : amdgpu_device pointer
*
* This disables all VM page table .
*/
static void gmc_v10_0_gart_disable ( struct amdgpu_device * adev )
{
gfxhub_v2_0_gart_disable ( adev ) ;
mmhub_v2_0_gart_disable ( adev ) ;
amdgpu_gart_table_vram_unpin ( adev ) ;
}
static int gmc_v10_0_hw_fini ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
if ( amdgpu_sriov_vf ( adev ) ) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG ( " For SRIOV client, shouldn't do anything. \n " ) ;
return 0 ;
}
amdgpu_irq_put ( adev , & adev - > gmc . vm_fault , 0 ) ;
gmc_v10_0_gart_disable ( adev ) ;
return 0 ;
}
static int gmc_v10_0_suspend ( void * handle )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
gmc_v10_0_hw_fini ( adev ) ;
return 0 ;
}
static int gmc_v10_0_resume ( void * handle )
{
int r ;
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
r = gmc_v10_0_hw_init ( adev ) ;
if ( r )
return r ;
amdgpu_vmid_reset_all ( adev ) ;
return 0 ;
}
static bool gmc_v10_0_is_idle ( void * handle )
{
/* MC is always ready in GMC v10.*/
return true ;
}
static int gmc_v10_0_wait_for_idle ( void * handle )
{
/* There is no need to wait for MC idle in GMC v10.*/
return 0 ;
}
static int gmc_v10_0_soft_reset ( void * handle )
{
return 0 ;
}
static int gmc_v10_0_set_clockgating_state ( void * handle ,
enum amd_clockgating_state state )
{
int r ;
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
r = mmhub_v2_0_set_clockgating ( adev , state ) ;
if ( r )
return r ;
return athub_v2_0_set_clockgating ( adev , state ) ;
}
static void gmc_v10_0_get_clockgating_state ( void * handle , u32 * flags )
{
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
mmhub_v2_0_get_clockgating ( adev , flags ) ;
athub_v2_0_get_clockgating ( adev , flags ) ;
}
static int gmc_v10_0_set_powergating_state ( void * handle ,
enum amd_powergating_state state )
{
return 0 ;
}
const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
. name = " gmc_v10_0 " ,
. early_init = gmc_v10_0_early_init ,
. late_init = gmc_v10_0_late_init ,
. sw_init = gmc_v10_0_sw_init ,
. sw_fini = gmc_v10_0_sw_fini ,
. hw_init = gmc_v10_0_hw_init ,
. hw_fini = gmc_v10_0_hw_fini ,
. suspend = gmc_v10_0_suspend ,
. resume = gmc_v10_0_resume ,
. is_idle = gmc_v10_0_is_idle ,
. wait_for_idle = gmc_v10_0_wait_for_idle ,
. soft_reset = gmc_v10_0_soft_reset ,
. set_clockgating_state = gmc_v10_0_set_clockgating_state ,
. set_powergating_state = gmc_v10_0_set_powergating_state ,
. get_clockgating_state = gmc_v10_0_get_clockgating_state ,
} ;
const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
{
. type = AMD_IP_BLOCK_TYPE_GMC ,
. major = 10 ,
. minor = 0 ,
. rev = 0 ,
. funcs = & gmc_v10_0_ip_funcs ,
} ;