2017-05-04 14:59:54 -04:00
/*
* Copyright 2016 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include "amdgpu.h"
# include "amdgpu_atombios.h"
# include "nbio_v7_0.h"
2017-11-27 17:29:29 +08:00
# include "nbio/nbio_7_0_default.h"
# include "nbio/nbio_7_0_offset.h"
# include "nbio/nbio_7_0_sh_mask.h"
2019-01-07 06:02:06 -05:00
# include "nbio/nbio_7_0_smn.h"
2017-11-24 12:31:36 +08:00
# include "vega10_enum.h"
2019-04-04 15:47:34 -05:00
# include <uapi/linux/kfd_ioctl.h>
2017-05-04 14:59:54 -04:00
# define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
2019-04-04 15:47:34 -05:00
static void nbio_v7_0_remap_hdp_registers ( struct amdgpu_device * adev )
{
WREG32_SOC15 ( NBIO , 0 , mmREMAP_HDP_MEM_FLUSH_CNTL ,
adev - > rmmio_remap . reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) ;
WREG32_SOC15 ( NBIO , 0 , mmREMAP_HDP_REG_FLUSH_CNTL ,
adev - > rmmio_remap . reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL ) ;
}
2017-12-08 13:07:58 -05:00
static u32 nbio_v7_0_get_rev_id ( struct amdgpu_device * adev )
2017-05-04 14:59:54 -04:00
{
2017-06-12 13:43:36 -04:00
u32 tmp = RREG32_SOC15 ( NBIO , 0 , mmRCC_DEV0_EPF0_STRAP0 ) ;
2017-05-04 14:59:54 -04:00
tmp & = RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK ;
tmp > > = RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT ;
return tmp ;
}
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_mc_access_enable ( struct amdgpu_device * adev , bool enable )
2017-05-04 14:59:54 -04:00
{
if ( enable )
2017-06-12 13:43:36 -04:00
WREG32_SOC15 ( NBIO , 0 , mmBIF_FB_EN ,
2017-05-04 14:59:54 -04:00
BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK ) ;
else
2017-06-12 13:43:36 -04:00
WREG32_SOC15 ( NBIO , 0 , mmBIF_FB_EN , 0 ) ;
2017-05-04 14:59:54 -04:00
}
2018-01-19 14:17:40 +01:00
static void nbio_v7_0_hdp_flush ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring )
2017-05-04 14:59:54 -04:00
{
2018-01-19 14:17:40 +01:00
if ( ! ring | | ! ring - > funcs - > emit_wreg )
2019-04-04 15:47:34 -05:00
WREG32_NO_KIQ ( ( adev - > rmmio_remap . reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) > > 2 , 0 ) ;
2018-01-19 14:17:40 +01:00
else
2019-04-04 15:47:34 -05:00
amdgpu_ring_emit_wreg ( ring , ( adev - > rmmio_remap . reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) > > 2 , 0 ) ;
2017-05-04 14:59:54 -04:00
}
2017-12-08 13:07:58 -05:00
static u32 nbio_v7_0_get_memsize ( struct amdgpu_device * adev )
2017-05-04 14:59:54 -04:00
{
2017-06-12 13:43:36 -04:00
return RREG32_SOC15 ( NBIO , 0 , mmRCC_CONFIG_MEMSIZE ) ;
2017-05-04 14:59:54 -04:00
}
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_sdma_doorbell_range ( struct amdgpu_device * adev , int instance ,
2018-12-17 20:03:45 -06:00
bool use_doorbell , int doorbell_index , int doorbell_size )
2017-05-04 14:59:54 -04:00
{
2017-11-28 17:01:21 -05:00
u32 reg = instance = = 0 ? SOC15_REG_OFFSET ( NBIO , 0 , mmBIF_SDMA0_DOORBELL_RANGE ) :
SOC15_REG_OFFSET ( NBIO , 0 , mmBIF_SDMA1_DOORBELL_RANGE ) ;
u32 doorbell_range = RREG32 ( reg ) ;
2017-05-04 14:59:54 -04:00
if ( use_doorbell ) {
doorbell_range = REG_SET_FIELD ( doorbell_range , BIF_SDMA0_DOORBELL_RANGE , OFFSET , doorbell_index ) ;
2018-12-17 20:03:45 -06:00
doorbell_range = REG_SET_FIELD ( doorbell_range , BIF_SDMA0_DOORBELL_RANGE , SIZE , doorbell_size ) ;
2017-05-04 14:59:54 -04:00
} else
doorbell_range = REG_SET_FIELD ( doorbell_range , BIF_SDMA0_DOORBELL_RANGE , SIZE , 0 ) ;
2017-11-28 17:01:21 -05:00
WREG32 ( reg , doorbell_range ) ;
2017-05-04 14:59:54 -04:00
}
2019-07-15 10:14:17 -04:00
static void nbio_v7_0_vcn_doorbell_range ( struct amdgpu_device * adev , bool use_doorbell ,
int doorbell_index , int instance )
{
u32 reg = SOC15_REG_OFFSET ( NBIO , 0 , mmBIF_MMSCH0_DOORBELL_RANGE ) ;
u32 doorbell_range = RREG32 ( reg ) ;
if ( use_doorbell ) {
doorbell_range = REG_SET_FIELD ( doorbell_range ,
BIF_MMSCH0_DOORBELL_RANGE , OFFSET ,
doorbell_index ) ;
doorbell_range = REG_SET_FIELD ( doorbell_range ,
BIF_MMSCH0_DOORBELL_RANGE , SIZE , 8 ) ;
} else
doorbell_range = REG_SET_FIELD ( doorbell_range ,
BIF_MMSCH0_DOORBELL_RANGE , SIZE , 0 ) ;
WREG32 ( reg , doorbell_range ) ;
}
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_enable_doorbell_aperture ( struct amdgpu_device * adev ,
bool enable )
2017-05-04 14:59:54 -04:00
{
2017-06-12 13:43:36 -04:00
WREG32_FIELD15 ( NBIO , 0 , RCC_DOORBELL_APER_EN , BIF_DOORBELL_APER_EN , enable ? 1 : 0 ) ;
2017-05-04 14:59:54 -04:00
}
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_enable_doorbell_selfring_aperture ( struct amdgpu_device * adev ,
bool enable )
{
}
static void nbio_v7_0_ih_doorbell_range ( struct amdgpu_device * adev ,
bool use_doorbell , int doorbell_index )
2017-05-04 14:59:54 -04:00
{
2017-06-12 13:43:36 -04:00
u32 ih_doorbell_range = RREG32_SOC15 ( NBIO , 0 , mmBIF_IH_DOORBELL_RANGE ) ;
2017-05-04 14:59:54 -04:00
if ( use_doorbell ) {
ih_doorbell_range = REG_SET_FIELD ( ih_doorbell_range , BIF_IH_DOORBELL_RANGE , OFFSET , doorbell_index ) ;
ih_doorbell_range = REG_SET_FIELD ( ih_doorbell_range , BIF_IH_DOORBELL_RANGE , SIZE , 2 ) ;
} else
ih_doorbell_range = REG_SET_FIELD ( ih_doorbell_range , BIF_IH_DOORBELL_RANGE , SIZE , 0 ) ;
2017-06-12 13:43:36 -04:00
WREG32_SOC15 ( NBIO , 0 , mmBIF_IH_DOORBELL_RANGE , ih_doorbell_range ) ;
2017-05-04 14:59:54 -04:00
}
static uint32_t nbio_7_0_read_syshub_ind_mmr ( struct amdgpu_device * adev , uint32_t offset )
{
uint32_t data ;
2017-06-12 13:43:36 -04:00
WREG32_SOC15 ( NBIO , 0 , mmSYSHUB_INDEX , offset ) ;
data = RREG32_SOC15 ( NBIO , 0 , mmSYSHUB_DATA ) ;
2017-05-04 14:59:54 -04:00
return data ;
}
static void nbio_7_0_write_syshub_ind_mmr ( struct amdgpu_device * adev , uint32_t offset ,
uint32_t data )
{
2017-06-12 13:43:36 -04:00
WREG32_SOC15 ( NBIO , 0 , mmSYSHUB_INDEX , offset ) ;
WREG32_SOC15 ( NBIO , 0 , mmSYSHUB_DATA , data ) ;
2017-05-04 14:59:54 -04:00
}
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_update_medium_grain_clock_gating ( struct amdgpu_device * adev ,
bool enable )
2017-05-04 14:59:54 -04:00
{
uint32_t def , data ;
/* NBIF_MGCG_CTRL_LCLK */
def = data = RREG32_PCIE ( smnNBIF_MGCG_CTRL_LCLK ) ;
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_MGCG ) )
data | = NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK ;
else
data & = ~ NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK ;
if ( def ! = data )
WREG32_PCIE ( smnNBIF_MGCG_CTRL_LCLK , data ) ;
/* SYSHUB_MGCG_CTRL_SOCCLK */
def = data = nbio_7_0_read_syshub_ind_mmr ( adev , ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK ) ;
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_MGCG ) )
data | = SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK ;
else
data & = ~ SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK ;
if ( def ! = data )
nbio_7_0_write_syshub_ind_mmr ( adev , ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK , data ) ;
/* SYSHUB_MGCG_CTRL_SHUBCLK */
def = data = nbio_7_0_read_syshub_ind_mmr ( adev , ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK ) ;
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_MGCG ) )
data | = SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK ;
else
data & = ~ SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK ;
if ( def ! = data )
nbio_7_0_write_syshub_ind_mmr ( adev , ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK , data ) ;
}
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_update_medium_grain_light_sleep ( struct amdgpu_device * adev ,
bool enable )
{
uint32_t def , data ;
def = data = RREG32_PCIE ( smnPCIE_CNTL2 ) ;
if ( enable & & ( adev - > cg_flags & AMD_CG_SUPPORT_BIF_LS ) ) {
data | = ( PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK ) ;
} else {
data & = ~ ( PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK ) ;
}
if ( def ! = data )
WREG32_PCIE ( smnPCIE_CNTL2 , data ) ;
}
static void nbio_v7_0_get_clockgating_state ( struct amdgpu_device * adev ,
u32 * flags )
{
int data ;
/* AMD_CG_SUPPORT_BIF_MGCG */
data = RREG32_PCIE ( smnCPM_CONTROL ) ;
if ( data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK )
* flags | = AMD_CG_SUPPORT_BIF_MGCG ;
/* AMD_CG_SUPPORT_BIF_LS */
data = RREG32_PCIE ( smnPCIE_CNTL2 ) ;
if ( data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK )
* flags | = AMD_CG_SUPPORT_BIF_LS ;
}
static void nbio_v7_0_ih_control ( struct amdgpu_device * adev )
2017-05-04 14:59:54 -04:00
{
u32 interrupt_cntl ;
/* setup interrupt control */
2018-02-22 08:35:11 +01:00
WREG32_SOC15 ( NBIO , 0 , mmINTERRUPT_CNTL2 , adev - > dummy_page_addr > > 8 ) ;
2017-06-12 13:43:36 -04:00
interrupt_cntl = RREG32_SOC15 ( NBIO , 0 , mmINTERRUPT_CNTL ) ;
2017-05-04 14:59:54 -04:00
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK = 1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD ( interrupt_cntl , INTERRUPT_CNTL , IH_DUMMY_RD_OVERRIDE , 0 ) ;
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD ( interrupt_cntl , INTERRUPT_CNTL , IH_REQ_NONSNOOP_EN , 0 ) ;
2017-06-12 13:43:36 -04:00
WREG32_SOC15 ( NBIO , 0 , mmINTERRUPT_CNTL , interrupt_cntl ) ;
2017-05-04 14:59:54 -04:00
}
2017-12-08 11:39:49 -05:00
static u32 nbio_v7_0_get_hdp_flush_req_offset ( struct amdgpu_device * adev )
2017-11-28 17:01:21 -05:00
{
return SOC15_REG_OFFSET ( NBIO , 0 , mmGPU_HDP_FLUSH_REQ ) ;
}
2017-12-08 11:39:49 -05:00
static u32 nbio_v7_0_get_hdp_flush_done_offset ( struct amdgpu_device * adev )
2017-11-28 17:01:21 -05:00
{
return SOC15_REG_OFFSET ( NBIO , 0 , mmGPU_HDP_FLUSH_DONE ) ;
}
2017-12-08 11:39:49 -05:00
static u32 nbio_v7_0_get_pcie_index_offset ( struct amdgpu_device * adev )
2017-11-28 17:01:21 -05:00
{
return SOC15_REG_OFFSET ( NBIO , 0 , mmPCIE_INDEX2 ) ;
}
2017-12-08 11:39:49 -05:00
static u32 nbio_v7_0_get_pcie_data_offset ( struct amdgpu_device * adev )
2017-11-28 17:01:21 -05:00
{
return SOC15_REG_OFFSET ( NBIO , 0 , mmPCIE_DATA2 ) ;
}
2017-09-29 10:47:43 +10:00
const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
. ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK ,
. ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK ,
. ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK ,
. ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK ,
. ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK ,
. ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK ,
. ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK ,
. ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK ,
. ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK ,
. ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK ,
. ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK ,
. ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK ,
} ;
2017-12-08 13:07:58 -05:00
static void nbio_v7_0_init_registers ( struct amdgpu_device * adev )
{
}
2017-11-28 17:01:21 -05:00
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
2017-12-08 11:39:49 -05:00
. get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset ,
. get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset ,
. get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset ,
. get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset ,
2017-12-08 13:07:58 -05:00
. get_rev_id = nbio_v7_0_get_rev_id ,
. mc_access_enable = nbio_v7_0_mc_access_enable ,
. hdp_flush = nbio_v7_0_hdp_flush ,
. get_memsize = nbio_v7_0_get_memsize ,
. sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range ,
2019-07-15 10:14:17 -04:00
. vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range ,
2017-12-08 13:07:58 -05:00
. enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture ,
. enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture ,
. ih_doorbell_range = nbio_v7_0_ih_doorbell_range ,
. update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating ,
. update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep ,
. get_clockgating_state = nbio_v7_0_get_clockgating_state ,
. ih_control = nbio_v7_0_ih_control ,
. init_registers = nbio_v7_0_init_registers ,
2019-04-04 15:47:34 -05:00
. remap_hdp_registers = nbio_v7_0_remap_hdp_registers ,
2017-09-29 10:08:01 +10:00
} ;