From b8fe56375f78835db47565d91ea9d21767fe3c08 Mon Sep 17 00:00:00 2001 From: Leon Huang Date: Tue, 8 Nov 2022 16:29:13 +0800 Subject: [PATCH 001/861] drm/amd/display: Refactor ABM feature [Why] Refactor ABM feature and implement inbox command for DMUB. [How] Implement the ioctl to send inbox command to DMUB. Reviewed-by: Rodrigo Siqueira Signed-off-by: Leon Huang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c | 294 +++++++----------- .../gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 286 +++++++++++++++++ .../gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h | 46 +++ drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | 6 + 5 files changed, 457 insertions(+), 177 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 0d7db132a20f..01490c9ba958 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -29,7 +29,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ -dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \ +dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dmub_abm_lcd.o dce_panel_cntl.o \ dmub_hw_lock_mgr.o dmub_outbox.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 9fc48208c2e4..a66f83a61402 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -24,212 +24,151 @@ */ #include "dmub_abm.h" -#include "dce_abm.h" +#include "dmub_abm_lcd.h" #include "dc.h" -#include "dc_dmub_srv.h" -#include "dmub/dmub_srv.h" #include "core_types.h" -#include "dm_services.h" -#include "reg_helper.h" -#include "fixed31_32.h" - -#include "atom.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) -#define REG(reg) \ - (dce_abm->regs->reg) +#define ABM_FEATURE_NO_SUPPORT 0 +#define ABM_LCD_SUPPORT 1 -#undef FN -#define FN(reg_name, field_name) \ - dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name - -#define CTX \ - dce_abm->base.ctx - -#define DISABLE_ABM_IMMEDIATELY 255 - - - -static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) +static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst) { - union dmub_rb_cmd cmd; - uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; - uint32_t edp_id_count = dc->dc_edp_id_count; - int i; - uint8_t panel_mask = 0; - - for (i = 0; i < edp_id_count; i++) - panel_mask |= 0x01 << i; - - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; - cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; - cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; - cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; - cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); - - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); -} - -static void dmub_abm_init(struct abm *abm, uint32_t backlight) -{ - struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - - REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3); - REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1); - REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3); - REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1); - REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1); - - REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, - ABM1_HG_NUM_OF_BINS_SEL, 0, - ABM1_HG_VMAX_SEL, 1, - ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); - - REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, - ABM1_IPCSC_COEFF_SEL_R, 2, - ABM1_IPCSC_COEFF_SEL_G, 4, - ABM1_IPCSC_COEFF_SEL_B, 2); - - REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, - BL1_PWM_CURRENT_ABM_LEVEL, backlight); - - REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, - BL1_PWM_TARGET_ABM_LEVEL, backlight); - - REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); - - REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, - ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, - ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); - - REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, - ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, - ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, - ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); - - dmub_abm_enable_fractional_pwm(abm->ctx); -} - -static unsigned int dmub_abm_get_current_backlight(struct abm *abm) -{ - struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); - - /* return backlight in hardware format which is unsigned 17 bits, with - * 1 bit integer and 16 bit fractional - */ - return backlight; -} - -static unsigned int dmub_abm_get_target_backlight(struct abm *abm) -{ - struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); - - /* return backlight in hardware format which is unsigned 17 bits, with - * 1 bit integer and 16 bit fractional - */ - return backlight; -} - -static bool dmub_abm_set_level(struct abm *abm, uint32_t level) -{ - union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; struct dc_link *edp_links[MAX_NUM_EDP]; int i; int edp_num; - uint8_t panel_mask = 0; + unsigned int ret = ABM_FEATURE_NO_SUPPORT; dc_get_edp_links(dc->dc, edp_links, &edp_num); for (i = 0; i < edp_num; i++) { - if (edp_links[i]->link_status.link_active) - panel_mask |= (0x01 << i); + if (panel_inst == i) + break; } - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_level.header.type = DMUB_CMD__ABM; - cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; - cmd.abm_set_level.abm_set_level_data.level = level; - cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; - cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); + if (i < edp_num) { + ret = ABM_LCD_SUPPORT; + } - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); - - return true; + return ret; } -static bool dmub_abm_init_config(struct abm *abm, +static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight) +{ + dmub_abm_init(abm, backlight); +} + +static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm) +{ + return dmub_abm_get_current_backlight(abm); +} + +static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm) +{ + return dmub_abm_get_target_backlight(abm); +} + +static bool dmub_abm_set_level_ex(struct abm *abm, uint32_t level) +{ + bool ret = false; + unsigned int feature_support, i; + uint8_t panel_mask0 = 0; + + for (i = 0; i < MAX_NUM_EDP; i++) { + feature_support = abm_feature_support(abm, i); + + if (feature_support == ABM_LCD_SUPPORT) + panel_mask0 |= (0x01 << i); + } + + if (panel_mask0) + ret = dmub_abm_set_level(abm, level, panel_mask0); + + return ret; +} + +static bool dmub_abm_init_config_ex(struct abm *abm, const char *src, unsigned int bytes, unsigned int inst) { - union dmub_rb_cmd cmd; - struct dc_context *dc = abm->ctx; - uint8_t panel_mask = 0x01 << inst; + unsigned int feature_support; - // TODO: Optimize by only reading back final 4 bytes - dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + feature_support = abm_feature_support(abm, inst); - // Copy iramtable into cw7 - memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes); - - memset(&cmd, 0, sizeof(cmd)); - // Fw will copy from cw7 to fw_state - cmd.abm_init_config.header.type = DMUB_CMD__ABM; - cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; - cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; - cmd.abm_init_config.abm_init_config_data.bytes = bytes; - cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask; - - cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); - - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (feature_support == ABM_LCD_SUPPORT) + dmub_abm_init_config(abm, src, bytes, inst); return true; } -static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) +static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) { - union dmub_rb_cmd cmd; - struct dc_context *dc = abm->ctx; - uint8_t panel_mask = 0x01 << panel_inst; + bool ret = false; + unsigned int feature_support; - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_pause.header.type = DMUB_CMD__ABM; - cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE; - cmd.abm_pause.abm_pause_data.enable = pause; - cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; - cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); + feature_support = abm_feature_support(abm, panel_inst); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_set_pause(abm, pause, panel_inst, stream_inst); - return true; + return ret; +} + +static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +{ + bool ret = false; + unsigned int feature_support; + + feature_support = abm_feature_support(abm, panel_inst); + + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst); + + return ret; +} + +static bool dmub_abm_set_event_ex(struct abm *abm, unsigned int full_screen, unsigned int video_mode, + unsigned int hdr_mode, unsigned int panel_inst) +{ + bool ret = false; + unsigned int feature_support; + + feature_support = abm_feature_support(abm, panel_inst); + + return ret; +} + +static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int controller_id, + unsigned int panel_inst) +{ + bool ret = false; + unsigned int feature_support; + + feature_support = abm_feature_support(abm, panel_inst); + + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_set_backlight_level(abm, backlight_pwm_u16_16, frame_ramp, panel_inst); + + return ret; } static const struct abm_funcs abm_funcs = { - .abm_init = dmub_abm_init, - .set_abm_level = dmub_abm_set_level, - .get_current_backlight = dmub_abm_get_current_backlight, - .get_target_backlight = dmub_abm_get_target_backlight, - .init_abm_config = dmub_abm_init_config, - .set_abm_pause = dmub_abm_set_pause, + .abm_init = dmub_abm_init_ex, + .set_abm_level = dmub_abm_set_level_ex, + .get_current_backlight = dmub_abm_get_current_backlight_ex, + .get_target_backlight = dmub_abm_get_target_backlight_ex, + .init_abm_config = dmub_abm_init_config_ex, + .set_abm_pause = dmub_abm_set_pause_ex, + .set_pipe_ex = dmub_abm_set_pipe_ex, + .set_abm_event = dmub_abm_set_event_ex, + .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; static void dmub_abm_construct( @@ -256,16 +195,19 @@ struct abm *dmub_abm_create( const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { - struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); + if (ctx->dc->caps.dmcub_support) { + struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); - if (abm_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; + if (abm_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); + + return &abm_dce->base; } - - dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask); - - return &abm_dce->base; + return NULL; } void dmub_abm_destroy(struct abm **abm) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c new file mode 100644 index 000000000000..4055d271ac57 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -0,0 +1,286 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_abm.h" +#include "dce_abm.h" +#include "dc.h" +#include "dc_dmub_srv.h" +#include "dmub/dmub_srv.h" +#include "core_types.h" +#include "dm_services.h" +#include "reg_helper.h" +#include "fixed31_32.h" + +#ifdef _WIN32 +#include "atombios.h" +#else +#include "atom.h" +#endif + +#define TO_DMUB_ABM(abm)\ + container_of(abm, struct dce_abm, base) + +#define REG(reg) \ + (dce_abm->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name + +#define CTX \ + dce_abm->base.ctx + +#define DISABLE_ABM_IMMEDIATELY 255 + + + +static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) +{ + union dmub_rb_cmd cmd; + uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; + uint32_t edp_id_count = dc->dc_edp_id_count; + int i; + uint8_t panel_mask = 0; + + for (i = 0; i < edp_id_count; i++) + panel_mask |= 0x01 << i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; + cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; + cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +void dmub_abm_init(struct abm *abm, uint32_t backlight) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3); + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1); + REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1); + + REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, + ABM1_HG_NUM_OF_BINS_SEL, 0, + ABM1_HG_VMAX_SEL, 1, + ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0); + + REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0, + ABM1_IPCSC_COEFF_SEL_R, 2, + ABM1_IPCSC_COEFF_SEL_G, 4, + ABM1_IPCSC_COEFF_SEL_B, 2); + + REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL, + BL1_PWM_CURRENT_ABM_LEVEL, backlight); + + REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL, + BL1_PWM_TARGET_ABM_LEVEL, backlight); + + REG_UPDATE(BL1_PWM_USER_LEVEL, + BL1_PWM_USER_LEVEL, backlight); + + REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, + ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, + ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000); + + REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0, + ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1, + ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1, + ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); + + dmub_abm_enable_fractional_pwm(abm->ctx); +} + +unsigned int dmub_abm_get_current_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +unsigned int dmub_abm_get_target_backlight(struct abm *abm) +{ + struct dce_abm *dce_abm = TO_DMUB_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_level.header.type = DMUB_CMD__ABM; + cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; + cmd.abm_set_level.abm_set_level_data.level = level; + cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; + cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +#ifndef TRIM_AMBIENT_GAMMA +void dmub_abm_set_ambient_level(struct abm *abm, unsigned int ambient_lux, uint8_t panel_mask) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + if (ambient_lux > 0xFFFF) + ambient_lux = 0xFFFF; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_ambient_level.header.type = DMUB_CMD__ABM; + cmd.abm_set_ambient_level.header.sub_type = DMUB_CMD__ABM_SET_AMBIENT_LEVEL; + cmd.abm_set_ambient_level.abm_set_ambient_level_data.ambient_lux = ambient_lux; + cmd.abm_set_ambient_level.abm_set_ambient_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_ambient_level.abm_set_ambient_level_data.panel_mask = panel_mask; + cmd.abm_set_ambient_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_ambient_level_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} +#endif + +void dmub_abm_init_config(struct abm *abm, + const char *src, + unsigned int bytes, + unsigned int inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint8_t panel_mask = 0x01 << inst; + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes); + + memset(&cmd, 0, sizeof(cmd)); + // Fw will copy from cw7 to fw_state + cmd.abm_init_config.header.type = DMUB_CMD__ABM; + cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; + cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_init_config.abm_init_config_data.bytes = bytes; + cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask; + + cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + +} + +bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint8_t panel_mask = 0x01 << panel_inst; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_pause.header.type = DMUB_CMD__ABM; + cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE; + cmd.abm_pause.abm_pause_data.enable = pause; + cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; + cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint32_t ramping_boundary = 0xFFFF; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; + cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; + cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; + cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; + cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; + cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; + cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +bool dmub_abm_set_backlight_level(struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h new file mode 100644 index 000000000000..00b4e268768e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DMUB_ABM_LCD_H__ +#define __DMUB_ABM_LCD_H__ + +#include "abm.h" + +void dmub_abm_init(struct abm *abm, uint32_t backlight); +bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); +unsigned int dmub_abm_get_current_backlight(struct abm *abm); +unsigned int dmub_abm_get_target_backlight(struct abm *abm); +void dmub_abm_init_config(struct abm *abm, + const char *src, + unsigned int bytes, + unsigned int inst); + +bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst); +bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); +bool dmub_abm_set_backlight_level(struct abm *abm, + unsigned int backlight_pwm_u16_16, + unsigned int frame_ramp, + unsigned int panel_inst); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index ecb4191b6e64..db5cf9acafe6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -55,6 +55,12 @@ struct abm_funcs { unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); + bool (*set_pipe_ex)(struct abm *abm, + unsigned int otg_inst, + unsigned int option, + unsigned int panel_inst); + bool (*set_abm_event)(struct abm *abm, unsigned int full_screen, unsigned int video_mode, + unsigned int hdr_mode, unsigned int panel_inst); }; #endif From e4dfd94d5e3851df607b26ab5b20ad8d94f5ccff Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 17 Apr 2023 22:30:01 +0530 Subject: [PATCH 002/861] drm/amd/display: Unconditionally print when DP sink power state fails The previous 'commit ca9beb8aac68 ("drm/amd/display: Add logging when setting DP sink power state fails")', it is better to unconditionally print "failed to power up sink", because we are returning DC_ERROR_UNEXPECTED. Fixes: ca9beb8aac68 ("drm/amd/display: Add logging when setting DP sink power state fails") Cc: Aurabindo Pillai Cc: Fangzhi Zuo Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/link/protocols/link_dp_capability.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index ba98013fecd0..6d2d10da2b77 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1043,9 +1043,7 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link) DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); - if (status < 0) - DC_LOG_DC("%s: Failed to power up sink: %s\n", __func__, - dpcd_power_state == DP_SET_POWER_D0 ? "D0" : "D3"); + DC_LOG_DC("%s: Failed to power up sink\n", __func__); return DC_ERROR_UNEXPECTED; } From 9b035d089086deb75d6664b26d36e35853e58ce9 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 15 Apr 2023 21:03:16 +0530 Subject: [PATCH 003/861] drm/amd/display: Check & log if receiver supports MST, DSC & FEC. After reading from receiver via DPCD, check & log if it supports MST, DSC & FEC Cc: Aurabindo Pillai Cc: Fangzhi Zuo Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- .../dc/link/protocols/link_dp_capability.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 6d2d10da2b77..2914fca7dab3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1552,6 +1552,9 @@ static bool retrieve_link_cap(struct dc_link *link) int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; const uint32_t post_oui_delay = 30; // 30ms + bool is_fec_supported = false; + bool is_dsc_basic_supported = false; + bool is_dsc_passthrough_supported = false; memset(dpcd_data, '\0', sizeof(dpcd_data)); memset(&down_strm_port_count, @@ -1694,6 +1697,7 @@ static bool retrieve_link_cap(struct dc_link *link) /* TODO - decouple raw mst capability from policy decision */ link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); + DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); get_active_converter_info(ds_port.byte, link); @@ -1801,6 +1805,18 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); + if (status == DC_OK) { + is_fec_supported = link->dpcd_caps.fec_cap.bits.FEC_CAPABLE; + is_dsc_basic_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT; + is_dsc_passthrough_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT; + DC_LOG_DC("%s: FEC_Sink_Support: %s\n", __func__, + str_yes_no(is_fec_supported)); + DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_basic_supported)); + if (link->dpcd_caps.is_mst_capable) + DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_passthrough_supported)); + } if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { status = core_link_read_dpcd( link, From 6f0ef80a00adfd51be22b6ab84acd48de1d3938d Mon Sep 17 00:00:00 2001 From: Leon Huang Date: Thu, 8 Dec 2022 16:06:07 +0800 Subject: [PATCH 004/861] drm/amd/display: Fix ABM pipe/backlight issues when change backlight [Why] set ABM pipe/backlight gets some issues when abm callback func pointers are NULL. For some usecase, driver would like to control PWM level before ABM resource is ready. However, recent flow refactor of ABM didn't consider that use case. [How] Rollback flow that sending inbox command to dmub directly when ABM function pointers aren't ready. Reviewed-by: Rodrigo Siqueira Signed-off-by: Leon Huang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn21/dcn21_hwseq.c | 64 ++++++++++++++----- 1 file changed, 47 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 2a182c2f57d6..1c6477d73c8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -159,6 +159,25 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio return true; } +static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; @@ -173,8 +192,12 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) } if (abm && panel_cntl) { - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, - panel_cntl->inst); + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, + panel_cntl->inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst); + } panel_cntl->funcs->store_backlight_level(panel_cntl); } } @@ -191,8 +214,13 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) return; } - if (abm && panel_cntl) - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + if (abm && panel_cntl) { + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } + } } bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, @@ -210,21 +238,23 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, return true; } - if (abm && panel_cntl) - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + if (abm != NULL) { + uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; - cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; - cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; - cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; - cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst); - cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + if (abm && panel_cntl) { + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); + } + } + } - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) + abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, + frame_ramp, 0, panel_cntl->inst); + else + dmub_abm_set_backlight(dc, backlight_pwm_u16_16, frame_ramp, panel_cntl->inst); return true; } From e101bf95ea87ccc03ac2f48dfc0757c6364ff3c7 Mon Sep 17 00:00:00 2001 From: Wesley Chalmers Date: Thu, 3 Nov 2022 22:29:31 -0400 Subject: [PATCH 005/861] drm/amd/display: Do not set drr on pipe commit [WHY] Writing to DRR registers such as OTG_V_TOTAL_MIN on the same frame as a pipe commit can cause underflow. [HOW] Move DMUB p-state delegate into optimze_bandwidth; enabling FAMS sets optimized_required. This change expects that Freesync requests are blocked when optimized_required is true. Reviewed-by: Rodrigo Siqueira Signed-off-by: Wesley Chalmers Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 7 +++++++ 2 files changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5403e9399a46..6ce10fd4bb1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2113,6 +2113,12 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc_dmub_srv_p_state_delegate(dc, + true, context); + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + } + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 0e071fbc9154..0411867654dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -985,11 +985,18 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context) { + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + + dc_dmub_srv_p_state_delegate(dc, false, context); } From 751e17147953bc30036b8fe0eaaf780b6951404c Mon Sep 17 00:00:00 2001 From: Wesley Chalmers Date: Tue, 28 Feb 2023 13:48:00 -0500 Subject: [PATCH 006/861] drm/amd/display: Block optimize on consecutive FAMS enables [WHY] It is possible to commit state multiple times in rapid succession with FAMS enabled; if each of these commits were to set optimized_required, then the user may see latency. [HOW] fw_based_mclk_switching is currently not used in dc->clk_mgr; use it to track whether the current state has FAMS enabled; if it has, then do not disable FAMS in prepare_bandwidth, and do not set optimized_required. Reviewed-by: Rodrigo Siqueira Signed-off-by: Wesley Chalmers Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 +++ .../drm/amd/display/dc/dcn30/dcn30_hwseq.c | 22 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 6ce10fd4bb1a..422fbf79da64 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2117,6 +2117,9 @@ void dcn20_optimize_bandwidth( dc_dmub_srv_p_state_delegate(dc, true, context); context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + dc->clk_mgr->clks.fw_based_mclk_switching = true; + } else { + dc->clk_mgr->clks.fw_based_mclk_switching = false; } dc->clk_mgr->funcs->update_clocks( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 0411867654dd..8263a07f265f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -983,9 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { dc->optimized_required = true; context->bw_ctx.bw.dcn.clk.p_state_change_support = false; } @@ -996,7 +1000,19 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + /* + * enabled -> enabled: do not disable + * enabled -> disabled: disable + * disabled -> enabled: don't care + * disabled -> disabled: don't care + */ + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); - dc_dmub_srv_p_state_delegate(dc, false, context); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } } From e0a77e09c707cf89317de00f87b94b1168f27acd Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 4 Apr 2023 14:54:05 -0600 Subject: [PATCH 007/861] drm/amd/display: Add missing WA and MCLK validation When the commit fff7eb56b376 ("drm/amd/display: Don't set dram clock change requirement for SubVP") was merged, we missed some parts associated with the MCLK switch. This commit adds all the missing parts. Fixes: fff7eb56b376 ("drm/amd/display: Don't set dram clock change requirement for SubVP") Reviewed-by: Aurabindo Pillai Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 1 + .../drm/amd/display/dc/dcn32/dcn32_resource.c | 2 +- .../drm/amd/display/dc/dml/dcn30/dcn30_fpu.c | 18 +++++++++++++++++- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index db0974fe58ab..1f5ee5cde6e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -948,6 +948,7 @@ void dcn32_init_hw(struct dc *dc) if (dc->ctx->dmub_srv) { dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 0beb11d95eb7..a876e6eb6cd8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2023,7 +2023,7 @@ int dcn32_populate_dml_pipes_from_context( // In general cases we want to keep the dram clock change requirement // (prefer configs that support MCLK switch). Only override to false // for SubVP - if (subvp_in_use) + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use) context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false; else context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index 80972ee5e55b..a352c703e258 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -368,7 +368,9 @@ void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) dc_assert_fp_enabled(); if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { - context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || + context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0) + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; } @@ -563,6 +565,20 @@ void dcn30_fpu_calculate_wm_and_dlg( pipe_idx++; } + // WA: restrict FPO to use first non-strobe mode (NV24 BW issue) + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && + dc->dml.soc.num_chans <= 4 && + context->bw_ctx.dml.vba.DRAMSpeed <= 1700 && + context->bw_ctx.dml.vba.DRAMSpeed >= 1500) { + + for (i = 0; i < dc->dml.soc.num_states; i++) { + if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) { + context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts; + break; + } + } + } + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); if (!pstate_en) From d63e31f66892f67d8f1e279c57c4c0aee789fc66 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Fri, 14 May 2021 14:04:02 -0400 Subject: [PATCH 008/861] drm/amd/display: copy dmub caps to dc on dcn31 [Why & How] Add code path to copy dmub caps to dc, which is missing on dcn31 Acked-by: Qingqing Zhuo Signed-off-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 7ac6e69cff37..62ce36c75c4d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -295,6 +295,10 @@ void dcn31_init_hw(struct dc *dc) if (dc->res_pool->hubbub->funcs->init_crb) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); #endif + + // Get DMCUB capabilities + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; } void dcn31_dsc_pg_control( From 4ad3ee5ccc77aa3f9d702f7b9ad4d9cfeca6c443 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Wed, 15 Mar 2023 19:09:15 +0800 Subject: [PATCH 009/861] drm/amd/display: allow edp updates for virtual signal [Why] When IGT's kms_hdmi_inject forces EDID for HDMI audio, dc rejects the request because virtual signal is not in dc_is_audio_capable_signal function. [How] Includes SIGNAL_TYPE_VIRTUAL as audio capable. Reviewed-by: Chao-kai Wang Acked-by: Qingqing Zhuo Signed-off-by: Alex Hung Signed-off-by: Wenchieh Chien Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/signal_types.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index beed70179bb5..23a308c3eccb 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -104,6 +104,7 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal) { return (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + signal == SIGNAL_TYPE_VIRTUAL || dc_is_hdmi_signal(signal)); } From 785b250e33c7b1a9dcdb262eac691cd33ac7a53a Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Wed, 22 Mar 2023 13:43:28 +0800 Subject: [PATCH 010/861] drm/amd/display: Fix in disabling secure display [Why] Currently we don't check if secure display is enabled before we send command to disable secure display in dmub. It will accidentally cause some other igt tests to fail, eg, crtc-linear-degamma. [How] Refactor the code we reset the secure display state to check secure display was enabled or not before stopping it in dmub. Reviewed-by: Wayne Lin Acked-by: Qingqing Zhuo Signed-off-by: Alan Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 31 ++++++++----------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 27711743c22c..0802f8e8fac5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -83,12 +83,15 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, } #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) +static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream) { struct drm_device *drm_dev = crtc->dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + bool was_activated; spin_lock_irq(&drm_dev->event_lock); + was_activated = acrtc->dm_irq_params.window_param.activated; acrtc->dm_irq_params.window_param.x_start = 0; acrtc->dm_irq_params.window_param.y_start = 0; acrtc->dm_irq_params.window_param.x_end = 0; @@ -97,6 +100,14 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) acrtc->dm_irq_params.window_param.update_win = false; acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; spin_unlock_irq(&drm_dev->event_lock); + + /* Disable secure_display if it was enabled */ + if (was_activated) { + /* stop ROI update on this crtc */ + flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work); + flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work); + dc_stream_forward_crc_window(stream, NULL, true); + } } static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) @@ -204,9 +215,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state, enum amdgpu_dm_pipe_crc_source source) { -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - int i; -#endif struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc_stream_state *stream_state = dm_crtc_state->stream; bool enable = amdgpu_dm_is_valid_crc_source(source); @@ -220,19 +228,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, /* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - /* Disable secure_display if it was enabled */ - if (!enable) { - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->dm.secure_display_ctxs[i].crtc == crtc) { - /* stop ROI update on this crtc */ - flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); - flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); - dc_stream_forward_crc_window(stream_state, NULL, true); - } - } - } -#endif if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, NULL, enable, enable)) { ret = -EINVAL; @@ -363,7 +358,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /* Reset secure_display when we change crc source from debugfs */ - amdgpu_dm_set_crc_window_default(crtc); + amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); #endif if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { From 72529b683c6d94fa87f3a42efd9b92ccfb8def41 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Fri, 24 Mar 2023 10:42:37 -0400 Subject: [PATCH 011/861] drm/amd/display: Fix hang when skipping modeset [Why&How] When skipping full modeset since the only state change was a front porch change, the DC commit sequence requires extra checks to handle non existant plane states being asked to be removed from context. Reviewed-by: Alvin Lee Acked-by: Qingqing Zhuo Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++++- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6cacb76f389e..c432436cd66d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7972,6 +7972,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, continue; dc_plane = dm_new_plane_state->dc_state; + if (!dc_plane) + continue; bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { @@ -9620,8 +9622,9 @@ static int dm_update_plane_state(struct dc *dc, return -EINVAL; } + if (dm_old_plane_state->dc_state) + dc_plane_state_release(dm_old_plane_state->dc_state); - dc_plane_state_release(dm_old_plane_state->dc_state); dm_new_plane_state->dc_state = NULL; *lock_and_validation_needed = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 85d54bfb595c..117d80cb36fb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context( struct dc_stream_status *stream_status = NULL; struct resource_pool *pool = dc->res_pool; + if (!plane_state) + return true; + for (i = 0; i < context->stream_count; i++) if (context->streams[i] == stream) { stream_status = &context->stream_status[i]; From c0162a05bd31129e29a23d4bd1d2321c9663d5bc Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Mon, 27 Mar 2023 09:10:48 -0400 Subject: [PATCH 012/861] drm/amd/display: fix memleak in aconnector->timing_requested [Why] when amdgpu_dm_update_connector_after_detect is called two times successively with valid sink, memory allocated of aconnector->timing_requested for the first call is not free. this causes memeleak. [How] allocate memory only when aconnector->timing_requested is null. Reviewed-by: Qingqing Zhuo Acked-by: Qingqing Zhuo Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c432436cd66d..8b03c8d8f0b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3128,9 +3128,12 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid); } - aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); - if (!aconnector->timing_requested) - dm_error("%s: failed to create aconnector->requested_timing\n", __func__); + if (!aconnector->timing_requested) { + aconnector->timing_requested = + kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); + if (!aconnector->timing_requested) + dm_error("failed to create aconnector->requested_timing\n"); + } drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); From 3cfd03b79425c8c9e10d15434f0b017249372609 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 27 Mar 2023 15:33:54 -0400 Subject: [PATCH 013/861] drm/amd/display: update max streams per surface Increse to 6 as that is the max surfaces supported asics can have. The is no practical use case yet, but this is valuable for pre-si validation. Reviewed-by: Ariel Bernstein Acked-by: Qingqing Zhuo Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 45ab48fe5d00..34c848311455 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -83,7 +83,7 @@ struct dc_perf_trace { unsigned long last_entry_write; }; -#define MAX_SURFACE_NUM 4 +#define MAX_SURFACE_NUM 6 #define NUM_PIXEL_FORMATS 10 enum tiling_mode { From d11dfbecc3feed5916bbe6c10942d9db61a9e2de Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 27 Mar 2023 18:40:34 -0400 Subject: [PATCH 014/861] drm/amd/display: Only consider DISPCLK when using optimized boot path [Description] - Previous bug fix for audio issue included dtbclk and p-state on the optimized boot path which is incorarect - We only care about DISPCLK in the optimized vs. non-optimized boot path to avoid audio issues Reviewed-by: Saaem Rizvi Acked-by: Qingqing Zhuo Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 1f5ee5cde6e1..26791e3d162f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -721,6 +721,9 @@ static void dcn32_initialize_min_clocks(struct dc *dc) clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; + clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; + clocks->fclk_p_state_change_support = true; + clocks->p_state_change_support = true; if (dc->debug.disable_boot_optimizations) { clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; } else { @@ -730,9 +733,6 @@ static void dcn32_initialize_min_clocks(struct dc *dc) * freq to ensure that the timing is valid and unchanged. */ clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr); - clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; - clocks->fclk_p_state_change_support = true; - clocks->p_state_change_support = true; } dc->clk_mgr->funcs->update_clocks( From c4edb01374685a3ea195c7d31459448cabe5a34e Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Tue, 28 Mar 2023 09:53:33 -0400 Subject: [PATCH 015/861] drm/amd/display: Reduce SubVP + DRR stretch margin [Description] - Having excessively large margin causes failure in the static schedulability check in some cases for SubVP + DRR - 100us of DRR margin is sufficient based on a weeks worth of stress testing on different display configs Reviewed-by: Michael Strauss Acked-by: Qingqing Zhuo Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 181a3408cc61..25284006019c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -144,7 +144,7 @@ struct test_pattern { unsigned int cust_pattern_size; }; -#define SUBVP_DRR_MARGIN_US 600 // 600us for DRR margin (SubVP + DRR) +#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR) enum mall_stream_type { SUBVP_NONE, // subvp not in use From e97cc04fe0fb33e489583dff79f6b1d6919fcc66 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Wed, 15 Feb 2023 15:47:59 -0500 Subject: [PATCH 016/861] drm/amd/display: refactor dmub commands into single function [Why & How] Consolidate dmub access to a single interface. This makes it easier to add code in the future that needs to run every time a dmub command is requested (e.g. instrumentation, locking etc). Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 +- .../drm/amd/display/dc/bios/command_table2.c | 25 +-- .../display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 4 +- .../dc/clk_mgr/dcn314/dcn314_clk_mgr.c | 4 +- .../dc/clk_mgr/dcn315/dcn315_clk_mgr.c | 4 +- .../dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 4 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 23 +-- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 164 +++++++----------- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 13 +- drivers/gpu/drm/amd/display/dc/dc_helper.c | 28 +-- .../gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 28 +-- .../drm/amd/display/dc/dce/dmub_hw_lock_mgr.c | 4 +- .../gpu/drm/amd/display/dc/dce/dmub_outbox.c | 4 +- drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 28 +-- .../gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 7 +- .../drm/amd/display/dc/dcn21/dcn21_hwseq.c | 8 +- .../drm/amd/display/dc/dcn30/dcn30_hwseq.c | 16 +- .../display/dc/dcn31/dcn31_dio_link_encoder.c | 8 +- .../drm/amd/display/dc/dcn31/dcn31_hwseq.c | 10 +- .../amd/display/dc/dcn31/dcn31_panel_cntl.c | 4 +- .../drm/amd/display/dc/dcn314/dcn314_hwseq.c | 4 +- .../drm/amd/display/dc/dcn32/dcn32_hwseq.c | 12 +- drivers/gpu/drm/amd/display/dc/dm_services.h | 7 + .../drm/amd/display/dc/dm_services_types.h | 6 + .../dc/link/protocols/link_dp_capability.c | 2 +- .../display/dc/link/protocols/link_dp_dpia.c | 2 +- 26 files changed, 148 insertions(+), 283 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8b03c8d8f0b8..af3efb245610 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -10311,7 +10311,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, input->cea_total_length = total_length; memcpy(input->payload, data, length); - res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); + res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { DRM_ERROR("EDID CEA parser failed\n"); return false; @@ -10761,3 +10761,13 @@ bool check_seamless_boot_capability(struct amdgpu_device *adev) return false; } + +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); +} + +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); +} diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 1ef9e4053bb7..90a02d7bd3da 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -123,9 +123,7 @@ static void encoder_control_dmcub( sizeof(cmd.digx_encoder_control.header); cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result encoder_control_digx_v1_5( @@ -261,9 +259,7 @@ static void transmitter_control_dmcub( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_6( @@ -325,9 +321,7 @@ static void transmitter_control_dmcub_v1_7( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_7( @@ -435,9 +429,7 @@ static void set_pixel_clock_dmcub( sizeof(cmd.set_pixel_clock.header); cmd.set_pixel_clock.pixel_clock.clk = *clk; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result set_pixel_clock_v7( @@ -804,9 +796,7 @@ static void enable_disp_power_gating_dmcub( sizeof(cmd.enable_disp_power_gating.header); cmd.enable_disp_power_gating.power_gating.pwr = *pwr; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_disp_power_gating_v2_1( @@ -1016,10 +1006,7 @@ static void enable_lvtma_control_dmcub( panel_instance; cmd.lvtma_control.data.bypass_panel_control_wait = bypass_panel_control_wait; - dc_dmub_srv_cmd_queue(dmcub, &cmd); - dc_dmub_srv_cmd_execute(dmcub); - dc_dmub_srv_wait_idle(dmcub); - + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_lvtma_control( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index f9e2e0c3095e..3c743cd3d3f0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -250,9 +250,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 5cb44f838bde..4d5cd59f6433 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -286,9 +286,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b737cbc468f5..300c6a5872d0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -234,9 +234,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 93db4dbee713..538126cefd4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -254,9 +254,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 52564b93f7eb..61eec5aa4067 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -515,8 +515,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, cmd.secure_display.roi_info.y_end = rect->y + rect->height; } - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static inline void @@ -3309,7 +3308,6 @@ void dc_dmub_update_dirty_rect(struct dc *dc, struct dc_state *context) { union dmub_rb_cmd cmd; - struct dc_context *dc_ctx = dc->ctx; struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; unsigned int i, j; unsigned int panel_inst = 0; @@ -3350,8 +3348,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; - dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } } } @@ -4606,7 +4603,6 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, { uint8_t action; union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; ASSERT(payload->length <= 16); @@ -4654,9 +4650,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, ); } - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -4700,7 +4694,6 @@ bool dc_process_dmub_set_config_async(struct dc *dc, struct dmub_notification *notify) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; bool is_cmd_complete = true; /* prepare SET_CONFIG command */ @@ -4711,7 +4704,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc, cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { /* command is not processed by dmub */ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; return is_cmd_complete; @@ -4746,7 +4739,6 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, uint8_t *mst_slots_in_use) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; /* prepare MST_ALLOC_SLOTS command */ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; @@ -4755,7 +4747,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) /* command is not processed by dmub */ return DC_ERROR_UNEXPECTED; @@ -4789,14 +4781,11 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable) { union dmub_rb_cmd cmd = {0}; - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; cmd.dpia_hpd_int_enable.enable = hpd_int_enable; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index a9b9490a532c..954cbfdbc3b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -65,47 +65,6 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) } } -void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, - union dmub_rb_cmd *cmd) -{ - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; - - status = dmub_srv_cmd_queue(dmub, cmd); - if (status == DMUB_STATUS_OK) - return; - - if (status != DMUB_STATUS_QUEUE_FULL) - goto error; - - /* Execute and wait for queue to become empty again. */ - dc_dmub_srv_cmd_execute(dc_dmub_srv); - dc_dmub_srv_wait_idle(dc_dmub_srv); - - /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, cmd); - if (status == DMUB_STATUS_OK) - return; - -error: - DC_ERROR("Error queuing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); -} - -void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) -{ - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; - - status = dmub_srv_cmd_execute(dmub); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } -} - void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; @@ -159,22 +118,55 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, } } -bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) +bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { + return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); +} + +bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; struct dmub_srv *dmub; enum dmub_status status; + int i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; - status = dmub_srv_cmd_with_reply_data(dmub, cmd); + for (i = 0 ; i < count; i++) { + // Queue command + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + } + + status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } + // Wait for DMUB to process command + if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { + status = dmub_srv_wait_for_idle(dmub, 100000); + + if (status != DMUB_STATUS_OK) { + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + return false; + } + + // Copy data back from ring buffer into command + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + } + return true; } @@ -267,9 +259,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) @@ -283,9 +273,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) @@ -378,21 +366,14 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); // Send the command to the DMCUB. - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } -void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) +void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; - enum dmub_status status; - - if (!dmub) { - return; - } memset(&cmd, 0, sizeof(cmd)); @@ -402,15 +383,10 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) cmd.query_feature_caps.header.ret_status = 1; cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); - /* Send command to fw */ - status = dmub_srv_cmd_with_reply_data(dmub, &cmd); - - ASSERT(status == DMUB_STATUS_OK); - /* If command was processed, copy feature caps to dmub srv */ - if (status == DMUB_STATUS_OK && + if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_feature_caps.header.ret_status == 0) { - memcpy(&dmub->feature_caps, + memcpy(&dc_dmub_srv->dmub->feature_caps, &cmd.query_feature_caps.query_feature_caps_data, sizeof(struct dmub_feature_caps)); } @@ -419,7 +395,6 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) { union dmub_rb_cmd cmd = { 0 }; - enum dmub_status status; unsigned int panel_inst = 0; dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst); @@ -433,13 +408,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; - // Send command to fw - status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd); - - ASSERT(status == DMUB_STATUS_OK); - // If command was processed, copy feature caps to dmub srv - if (status == DMUB_STATUS_OK && + if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.visual_confirm_color.header.ret_status == 0) { memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, &cmd.visual_confirm_color.visual_confirm_color_data, @@ -797,9 +767,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; } - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) @@ -982,14 +951,6 @@ static void dc_build_cursor_update_payload0( payload->panel_inst = panel_inst; } -static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv, - union dmub_rb_cmd *cmd) -{ - dc_dmub_srv_cmd_queue(dmub_srv, cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); -} - static void dc_build_cursor_position_update_payload0( struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, const struct hubp *hubp, const struct dpp *dpp) @@ -1032,9 +993,11 @@ static void dc_build_cursor_attribute_update_payload1( void dc_send_update_cursor_info_to_dmu( struct pipe_ctx *pCtx, uint8_t pipe_idx) { - union dmub_rb_cmd cmd = { 0 }; - union dmub_cmd_update_cursor_info_data *update_cursor_info = - &cmd.update_cursor_info.update_cursor_info_data; + union dmub_rb_cmd cmd[2]; + union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = + &cmd[0].update_cursor_info.update_cursor_info_data; + + memset(cmd, 0, sizeof(cmd)); if (!dc_dmub_should_update_cursor_data(pCtx)) return; @@ -1051,31 +1014,28 @@ void dc_send_update_cursor_info_to_dmu( { /* Build Payload#0 Header */ - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; - cmd.update_cursor_info.header.payload_bytes = - sizeof(cmd.update_cursor_info.update_cursor_info_data); - cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */ + cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; + cmd[0].update_cursor_info.header.payload_bytes = + sizeof(cmd[0].update_cursor_info.update_cursor_info_data); + cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd /* Prepare Payload */ - dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0); + dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); - dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx, + dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); - /* Send update_curosr_info to queue */ - dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd); - } + } { /* Build Payload#1 Header */ - memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data)); - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; - cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); - cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */ + cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; + cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); + cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. dc_build_cursor_attribute_update_payload1( - &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, + &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); /* Combine 2nd cmds update_curosr_info to DMU */ - dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd); + dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index d34f5563df2e..22f7b2704c8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -26,7 +26,7 @@ #ifndef _DMUB_DC_SRV_H_ #define _DMUB_DC_SRV_H_ -#include "os_types.h" +#include "dm_services_types.h" #include "dmub/dmub_srv.h" struct dmub_srv; @@ -52,16 +52,13 @@ struct dc_dmub_srv { void *dm; }; -void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, - union dmub_rb_cmd *cmd); - -void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); - void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); -bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd); +bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask); @@ -77,7 +74,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst); bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context); -void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub); +void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx); void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index f43cce16bb6c..a21948267c0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -41,19 +41,13 @@ static inline void submit_dmub_read_modify_write( const struct dc_context *ctx) { struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; - bool gather = false; offload->should_burst_write = (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); cmd_buf->header.payload_bytes = sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -66,17 +60,11 @@ static inline void submit_dmub_burst_write( const struct dc_context *ctx) { struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; - bool gather = false; cmd_buf->header.payload_bytes = sizeof(uint32_t) * offload->reg_seq_count; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -88,17 +76,11 @@ static inline void submit_dmub_reg_wait( const struct dc_context *ctx) { struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; - bool gather = false; - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; - - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; - - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; } struct dc_reg_value_masks { @@ -151,7 +133,6 @@ static void dmub_flush_buffer_execute( const struct dc_context *ctx) { submit_dmub_read_modify_write(offload, ctx); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } static void dmub_flush_burst_write_buffer_execute( @@ -159,7 +140,6 @@ static void dmub_flush_burst_write_buffer_execute( const struct dc_context *ctx) { submit_dmub_burst_write(offload, ctx); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, @@ -691,8 +671,6 @@ void reg_sequence_start_execute(const struct dc_context *ctx) default: return; } - - dc_dmub_srv_cmd_execute(ctx->dmub_srv); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 4055d271ac57..e152c68edfd1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -75,9 +75,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_abm_init(struct abm *abm, uint32_t backlight) @@ -156,9 +154,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -180,9 +176,7 @@ void dmub_abm_set_ambient_level(struct abm *abm, unsigned int ambient_lux, uint8 cmd.abm_set_ambient_level.abm_set_ambient_level_data.panel_mask = panel_mask; cmd.abm_set_ambient_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_ambient_level_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } #endif @@ -212,9 +206,7 @@ void dmub_abm_init_config(struct abm *abm, cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -231,9 +223,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -253,9 +243,7 @@ bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -277,9 +265,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm, cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 3f32e9c3fbaf..2aa0e01a6891 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -47,9 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, if (!lock) cmd.lock_hw.lock_hw_data.should_release = 1; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c index fff1d07d865d..d8009b2dc56a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c @@ -48,7 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv) sizeof(cmd.outbox1_enable.header); cmd.outbox1_enable.enable = true; - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dmub_srv); - dc_dmub_srv_wait_idle(dmub_srv); + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 9705d8f88382..4000a834592c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -168,9 +168,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -198,9 +196,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8 cmd.psr_enable.header.payload_bytes = 0; // Send header only - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit PSR may need to wait 1-2 frames to power up. Timeout after at @@ -248,9 +244,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -269,9 +263,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub, cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle; cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -290,9 +282,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -422,9 +412,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->relock_delay_frame_cnt = 2; copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -445,9 +433,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst) cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC; cmd.psr_enable.header.payload_bytes = 0; - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 58e459c7e7d3..f976fac8dc3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -667,7 +667,6 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip static void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) { - struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); union dmub_rb_cmd cmd; @@ -690,11 +689,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp, cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_cmd_queue(dmcub, &cmd); - PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_cmd_execute(dmcub); - PERF_TRACE(); // TODO: remove after performance is stable. - dc_dmub_srv_wait_idle(dmcub); + dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); PERF_TRACE(); // TODO: remove after performance is stable. } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 1c6477d73c8e..55a464a39529 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -152,9 +152,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -173,9 +171,7 @@ static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->dmub_srv); - dc_dmub_srv_wait_idle(dc->dmub_srv); + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8263a07f265f..3303c9aae068 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -632,7 +632,7 @@ void dcn30_init_hw(struct dc *dc) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); // Get DMCUB capabilities - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } @@ -736,8 +736,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ; cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -859,9 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.cursor_height = cursor_attr.height; cmd.mall.cursor_pitch = cursor_attr.pitch; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Use copied cursor, and it's okay to not switch back */ cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; @@ -877,8 +874,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.tmr_scale = tmr_scale; cmd.mall.debug_bits = dc->debug.mall_error_as_fatal; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -895,9 +891,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 745a5d187a98..bd62502380d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -117,7 +117,6 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; memset(cmd, 0, sizeof(*cmd)); cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; @@ -126,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd)) + if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return false; return true; @@ -425,7 +424,6 @@ static bool link_dpia_control(struct dc_context *dc_ctx, struct dmub_cmd_dig_dpia_control_data *dpia_control) { union dmub_rb_cmd cmd; - struct dc_dmub_srv *dmub = dc_ctx->dmub_srv; memset(&cmd, 0, sizeof(cmd)); @@ -438,9 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx, cmd.dig1_dpia_control.dpia_control = *dpia_control; - dc_dmub_srv_cmd_queue(dmub, &cmd); - dc_dmub_srv_cmd_execute(dmub); - dc_dmub_srv_wait_idle(dmub); + dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 62ce36c75c4d..e0c74868d2ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -297,7 +297,7 @@ void dcn31_init_hw(struct dc *dc) #endif // Get DMCUB capabilities - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; } @@ -442,9 +442,7 @@ void dcn31_z10_save_init(struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_z10_restore(const struct dc *dc) @@ -462,9 +460,7 @@ void dcn31_z10_restore(const struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 11ea9d13e312..217acd4e292a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; - return dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd); + return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); } static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) @@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; cmd.panel_cntl.data.bl_pwm_ref_div2 = panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return 0; panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 40c488b26901..6fb3f64e3057 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -417,9 +417,7 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool cmd.domain_control.data.inst = hubp_inst; cmd.domain_control.data.power_gate = !power_on; - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(ctx->dmub_srv); - dc_dmub_srv_wait_idle(ctx->dmub_srv); + dm_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); PERF_TRACE(); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 26791e3d162f..9ce11ed769a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -274,8 +274,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -309,8 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); cmd.cab.cab_alloc_ways = ways; - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -326,9 +324,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -946,7 +942,7 @@ void dcn32_init_hw(struct dc *dc) // Get DMCUB capabilities if (dc->ctx->dmub_srv) { - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 9a3f2a44f882..d33d595405a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,7 @@ struct dmub_srv; struct dc_dmub_srv; +union dmub_rb_cmd; irq_handler_idx dm_register_interrupt( struct dc_context *ctx, @@ -273,6 +274,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc #define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__, CTX) #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) +/* + * DMUB Interfaces + */ +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + /* * Debug and verification hooks */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index b52ba6ffabe1..facf269c4326 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -269,4 +269,10 @@ struct dtn_min_clk_info { uint32_t min_memory_clock_khz; }; +enum dm_dmub_wait_type { + DM_DMUB_WAIT_TYPE_NO_WAIT, + DM_DMUB_WAIT_TYPE_WAIT, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY, +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 2914fca7dab3..5ff5d4e64902 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1394,7 +1394,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( link->dc, link->link_enc->transmitter); - if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) && + if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.cable_id.header.ret_status == 1) { cable_id->raw = cmd.cable_id.data.output_raw; DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 4626fabc0a96..0bb749133909 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -90,7 +90,7 @@ bool dpia_query_hpd_status(struct dc_link *link) cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) + if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) is_hpd_high = cmd.query_hpd.data.result; DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", From 522b9a5d5852f99e51fbc460054dc8af3b4b5b30 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Fri, 24 Mar 2023 15:31:10 -0400 Subject: [PATCH 017/861] drm/amd/display: drain dmub inbox if queue is full [Why & How] If dmub command queuing fails due to the inbox being full, flush the inbox and resubmit the comamnd. This was previously the default behavior but was lost in a refactor. Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 954cbfdbc3b6..eef43577508c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -139,6 +139,15 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun // Queue command status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + if (status == DMUB_STATUS_QUEUE_FULL) { + /* Execute and wait for queue to become empty again. */ + dmub_srv_cmd_execute(dmub); + dmub_srv_wait_for_idle(dmub, 100000); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + } + if (status != DMUB_STATUS_OK) { DC_ERROR("Error queueing DMUB command: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); From cdff36a0217aadf5cbc167893ad1c0da869619cb Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Tue, 28 Mar 2023 10:45:24 -0400 Subject: [PATCH 018/861] drm/amd/display: fix access hdcp_workqueue assert [Why] hdcp are enabled for asics from raven. for old asics which hdcp are not enabled, hdcp_workqueue are null. some access to hdcp work queue are not guarded with pointer check. [How] add hdcp_workqueue pointer check before access workqueue. Reviewed-by: Bhawanpreet Lakha Acked-by: Qingqing Zhuo Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++ .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 16 ++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index af3efb245610..6e9d2e680d50 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8559,6 +8559,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + if (!adev->dm.hdcp_workqueue) + continue; + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); if (!connector) @@ -8607,6 +8610,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + if (!adev->dm.hdcp_workqueue) + continue; + new_crtc_state = NULL; old_crtc_state = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 994ba426ca66..5dc79b753d5f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -379,13 +379,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink && connector->state) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); - struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; - struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; - connector->state->hdcp_content_type = - hdcp_w->hdcp_content_type[connector->index]; - connector->state->content_protection = - hdcp_w->content_protection[connector->index]; + if (adev->dm.hdcp_workqueue) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } } if (aconnector->dc_sink) { From e3416e872f84086667df21daf166506fab97358d Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 24 Mar 2023 14:29:52 -0600 Subject: [PATCH 019/861] drm/amd/display: Add FAMS validation before trying to use it To ensure that FAMS can be used, DC must check if there is VRR support. This commit adds the required configuration to ensure FAMS can be executed in the target system. Reviewed-by: Alvin Lee Acked-by: Qingqing Zhuo Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c | 7 ++++++- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 61eec5aa4067..105f705bd91a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2604,6 +2604,12 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->mst_bw_update) su_flags->bits.mst_bw = 1; + + if (stream_update->stream && stream_update->stream->freesync_on_desktop && + (stream_update->vrr_infopacket || stream_update->allow_freesync || + stream_update->vrr_active_variable)) + su_flags->bits.fams_changed = 1; + if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) su_flags->bits.crtc_timing_adjust = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 25284006019c..270282fbda4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -131,6 +131,7 @@ union stream_update_flags { uint32_t dsc_changed : 1; uint32_t mst_bw : 1; uint32_t crtc_timing_adjust : 1; + uint32_t fams_changed : 1; } bits; uint32_t raw; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index c95f000b63b2..34b08d90dc1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -301,7 +301,12 @@ static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *o void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) { - optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max); + else + optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); } void optc3_tg_init(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 598fa1de54ce..1c55d3b01f53 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -360,7 +360,7 @@ union dmub_fw_boot_status { uint32_t optimized_init_done : 1; /**< 1 if optimized init done */ uint32_t restore_required : 1; /**< 1 if driver should call restore */ uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */ - uint32_t reserved : 1; + uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */ uint32_t detection_required: 1; /**< if detection need to be triggered by driver */ uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */ } bits; /**< status bits */ From 4d5f872dbc755114628c236e17421629ec522203 Mon Sep 17 00:00:00 2001 From: Iswara Nagulendran Date: Thu, 16 Mar 2023 16:29:06 -0400 Subject: [PATCH 020/861] drm/amd/display: Adding support for VESA SCR [HOW&WHY] Write DPCD 721 bit 7 to high, and the appropriate luminance level to DPCD 734-736 if bit 4 from DPCD register 734 is high, indicating that the panel luminance control is enabled from the panel side. Reviewed-by: Anthony Koo Acked-by: Qingqing Zhuo Signed-off-by: Iswara Nagulendran Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 7 +++++ .../dc/link/protocols/link_dp_capability.c | 9 +++++- .../link/protocols/link_edp_panel_control.c | 29 ++++++++++++++++--- include/drm/display/drm_dp.h | 3 ++ 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 49aab1924665..4a7f6497dc5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -566,6 +566,12 @@ struct dpcd_amd_device_id { uint8_t dal_version_byte2; }; +struct target_luminance_value { + uint8_t byte0; + uint8_t byte1; + uint8_t byte2; +}; + struct dpcd_source_backlight_set { struct { uint8_t byte0; @@ -1225,6 +1231,7 @@ struct dpcd_caps { union dp_main_line_channel_coding_cap channel_coding_cap; union dp_sink_video_fallback_formats fallback_formats; union dp_fec_capability1 fec_cap1; + bool panel_luminance_control; union dp_cable_id cable_id; uint8_t edp_rev; union edp_alpm_caps alpm_caps; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 5ff5d4e64902..84265dc66bba 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1450,7 +1450,8 @@ bool read_is_mst_supported(struct dc_link *link) */ static bool dpcd_read_sink_ext_caps(struct dc_link *link) { - uint8_t dpcd_data; + uint8_t dpcd_data = 0; + uint8_t edp_general_cap2 = 0; if (!link) return false; @@ -1459,6 +1460,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; + + if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) + return false; + + link->dpcd_caps.panel_luminance_control = (edp_general_cap2 & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) != 0; + return true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index d895046787bc..5ab2de12ccf8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -164,14 +164,35 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + if (!link->dpcd_caps.panel_luminance_control) { + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) - return false; + return false; - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, &backlight_control, 1) != DC_OK) - return false; + return false; + } else { + const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + struct target_luminance_value *target_luminance = NULL; + + //if target luminance value is greater than 24 bits, clip the value to 24 bits + if (backlight_millinits > 0xFFFFFF) + backlight_millinits = 0xFFFFFF; + + target_luminance = (struct target_luminance_value *)&backlight_millinits; + + if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, + sizeof(backlight_enable)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)(target_luminance), + sizeof(struct target_luminance_value)) != DC_OK) + return false; + } return true; } diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 358db4a9f167..d735073fdd81 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -983,6 +983,7 @@ #define DP_EDP_GENERAL_CAP_2 0x703 # define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) +# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4) #define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ # define DP_EDP_X_REGION_CAP_MASK (0xf << 0) @@ -1008,6 +1009,7 @@ # define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) # define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) # define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ +# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE (1 << 7) #define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 #define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 @@ -1032,6 +1034,7 @@ #define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 #define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 +#define DP_EDP_PANEL_TARGET_LUMINANCE_VALUE 0x734 #define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ #define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ From 6bfe9a23a8d6c7292d520747859a515fd429518d Mon Sep 17 00:00:00 2001 From: Nasir Osman Date: Thu, 30 Mar 2023 15:08:42 -0400 Subject: [PATCH 021/861] drm/amd/display: DSC policy override when ODM combine is forced [why] When we force ODM combine with DSC, we lose several 8 bit and 10 bit modes in validation and thus not able to use HDR. This is due to the number of horizontal slices used in DSC not properly being accounted for currently when 2:1 ODM Combine is forced. [how] Enforce at least two horizontal slices are used for DSC when ODM combine is forced. Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Nasir Osman Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dsc.h | 1 + drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 0e92a322c2ed..9491b76d61f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -58,6 +58,7 @@ struct dc_dsc_config_options { uint32_t dsc_min_slice_height_override; uint32_t max_target_bpp_limit_override_x16; uint32_t slice_height_granularity; + uint32_t dsc_force_odm_hslice_override; }; bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 2bdc47615543..b9a05bb025db 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -700,7 +700,7 @@ static int inc_num_slices(union dsc_enc_slice_caps slice_caps, int num_slices) } } - if (new_num_slices == num_slices) // No biger number of slices found + if (new_num_slices == num_slices) // No bigger number of slices found new_num_slices++; return new_num_slices; @@ -952,6 +952,13 @@ static bool setup_dsc_config( else is_dsc_possible = false; } + // When we force 2:1 ODM, we can't have 1 slice to divide amongst 2 separate DSC instances + // need to enforce at minimum 2 horizontal slices + if (options->dsc_force_odm_hslice_override) { + num_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, 2); + if (num_slices_h == 0) + is_dsc_possible = false; + } if (!is_dsc_possible) goto done; @@ -1163,6 +1170,7 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable) void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options) { options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override; + options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine; options->max_target_bpp_limit_override_x16 = 0; options->slice_height_granularity = 1; } From 7052a801d6bc8cd203e1708313e4996630208a6e Mon Sep 17 00:00:00 2001 From: Michael Mityushkin Date: Thu, 30 Mar 2023 12:46:58 -0400 Subject: [PATCH 022/861] drm/amd/display: Correct output color space during HW reinitialize [Why] Doing core_link_disable_stream or set_dpms_off when reinitializing hardware causes issue to repro with external display connected. This is unnecessary, blanking pixel data should be sufficient. [How] Call disable_pixel_data while reinitializing hardware instead of core_link_disable_stream or set_dpms_off. Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Michael Mityushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++++ drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 1 + drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 1 + 3 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 422fbf79da64..5800acf6aae1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -313,6 +313,10 @@ void dcn20_init_blank( } opp = dc->res_pool->opps[opp_id_src0]; + /* don't override the blank pattern if already enabled with the correct one. */ + if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp)) + return; + if (num_opps == 2) { otg_active_width = otg_active_width / 2; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 3a32810bbe38..8598ea233ef3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -58,6 +58,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 5267e901a35c..ce53339b2e10 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -60,6 +60,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, From 9fc6e4b36f2a748c853512d5ce4c8c4b98941c75 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 31 Mar 2023 17:38:21 -0400 Subject: [PATCH 023/861] drm/amd/display: Set watermarks set D equal to A [Description] - Since we do not use optimized watermark settings for MALL, set D = A - PMFW uses Set D for d0i3.1, so driver should make D = A for the time being - If we choose to optimize in the future we can set watermarks D correctly Reviewed-by: Jun Lei Acked-by: Qingqing Zhuo Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 23a972f2885f..2f7723053042 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2062,6 +2062,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) */ + /* if (dcn3_2_soc.num_states > 2) { vlevel_temp = 0; dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; @@ -2088,6 +2089,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + */ /* Set C, for Dummy P-State: * All clocks min. @@ -2189,6 +2191,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } + /* Make set D = set A since we do not optimized watermarks for MALL */ + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; From d062de7b68f27546a45c063b046b66c0a73633db Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 31 Mar 2023 20:02:57 -0400 Subject: [PATCH 024/861] drm/amd/display: Enable FPO + Vactive [Description] - Enable FPO + Vactive Reviewed-by: George Shen Acked-by: Qingqing Zhuo Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index a876e6eb6cd8..4f8286ae699b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -726,7 +726,7 @@ static const struct dc_debug_options debug_defaults_drv = { .override_dispclk_programming = true, .disable_fpo_optimizations = false, .fpo_vactive_margin_us = 2000, // 2000us - .disable_fpo_vactive = true, + .disable_fpo_vactive = false, .disable_boot_optimizations = false, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index e5ab7f3077c4..cf21b240fc55 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -725,7 +725,7 @@ static const struct dc_debug_options debug_defaults_drv = { .override_dispclk_programming = true, .disable_fpo_optimizations = false, .fpo_vactive_margin_us = 2000, // 2000us - .disable_fpo_vactive = true, + .disable_fpo_vactive = false, .disable_boot_optimizations = false, }; From 276641775848020c6e84166d1bc885e028a04680 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 1 Apr 2023 11:45:31 -0400 Subject: [PATCH 025/861] drm/amd/display: [FW Promotion] Release 0.0.162.0 - Add DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command - Remove d3 entry event and instead check for stream mask - dmu: Enable timeout recovery and detection for p-state Acked-by: Qingqing Zhuo Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 1c55d3b01f53..54b7786f5681 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -419,7 +419,8 @@ union dmub_fw_boot_options { uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ - uint32_t reserved : 14; /**< reserved */ + uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ + uint32_t reserved : 13; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -1125,8 +1126,6 @@ struct dmub_rb_cmd_idle_opt_dcn_restore { */ struct dmub_dcn_notify_idle_cntl_data { uint8_t driver_idle; - uint8_t d3_entry; - uint8_t trigger; uint8_t pad[1]; }; @@ -3550,6 +3549,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command. */ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable; + /** + * Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command. + */ + struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle; }; /** From 21fc0ff38f571debdba6aaff944addb50f49a7f7 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Mon, 3 Apr 2023 17:45:41 +0800 Subject: [PATCH 026/861] drm/amd/display: fix a divided-by-zero error [Why & How] timing.dsc_cfg.num_slices_v can be zero and it is necessary to check before using it. This fixes the error "divide error: 0000 [#1] PREEMPT SMP NOPTI". Reviewed-by: Aurabindo Pillai Acked-by: Qingqing Zhuo Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/power/power_helpers.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 0d3a983cb9ec..51e76bce92ea 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -927,6 +927,10 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + + if (stream->timing.dsc_cfg.num_slices_v == 0) + return false; + slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v; config->dsc_slice_height = slice_height; From ec341e0f4a02040ee8d6ef156f8bf02c5aa5c511 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 20 Mar 2023 16:47:21 -0400 Subject: [PATCH 027/861] drm/amd/display: add extra dc odm debug options [Why & How] Add options for dc odm debug. Reviewed-by: Ariel Bernstein Acked-by: Qingqing Zhuo Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 ++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 23ee63b98dcd..3595149deceb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -879,6 +879,8 @@ struct dc_debug_options { uint32_t fpo_vactive_margin_us; bool disable_fpo_vactive; bool disable_boot_optimizations; + bool override_odm_optimization; + bool minimize_dispclk_using_odm; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 270282fbda4a..0add5ecc895f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -172,6 +172,10 @@ struct mall_temp_config { bool is_phantom_plane[MAX_PIPES]; }; +struct dc_stream_debug_options { + char force_odm_combine_segments; +}; + struct dc_stream_state { // sink is deprecated, new code should not reference // this pointer @@ -182,6 +186,7 @@ struct dc_stream_state { * a stream via the volatile dc_state rather than the static dc_link. */ struct link_encoder *link_enc; + struct dc_stream_debug_options debug; struct dc_panel_patch sink_patches; union display_content_support content_support; struct dc_crtc_timing timing; From f0c5f9ebbc4cfd6b9611b8a5d9fb208c5f60e3e6 Mon Sep 17 00:00:00 2001 From: Michael Mityushkin Date: Thu, 30 Mar 2023 11:35:08 -0400 Subject: [PATCH 028/861] drm/amd/display: Apply correct panel mode when reinitializing hardware [Why] When link training during engine recovery, ASSR might fail causing panel mode to be reset to default. This should not happen for eDP as it will prevent the panel from turning back on. [How] Added dp_panel_mode to struct dc_link to remember previously applied panel mode. Do not reset panel mode to default while performing link training if previously used panel mode = eDP. Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Michael Mityushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../gpu/drm/amd/display/dc/link/protocols/link_dp_training.c | 5 ++++- .../amd/display/dc/link/protocols/link_edp_panel_control.c | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3595149deceb..3b53f36029d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1456,6 +1456,7 @@ struct dc_link { struct ddc_service *ddc; + enum dp_panel_mode panel_mode; bool aux_mode; /* Private to DC core */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 170f33835930..579fa222810d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1596,7 +1596,10 @@ bool perform_link_training_with_retries( * Report and continue with eDP panel mode to * perform eDP link training with right settings */ - cp_psp->funcs.enable_assr(cp_psp->handle, link); + bool result; + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + if (!result && link->panel_mode != DP_PANEL_MODE_EDP) + panel_mode = DP_PANEL_MODE_DEFAULT; } } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 5ab2de12ccf8..2039a345f23a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -83,6 +83,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) ASSERT(result == DC_OK); } } + link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", link->link_index, From a6c0c9f56197fcb3418be82a7d9f9952be1b5598 Mon Sep 17 00:00:00 2001 From: Jingwen Zhu Date: Thu, 30 Mar 2023 16:38:59 +0800 Subject: [PATCH 029/861] drm/amd/display: Improvement for handling edp link training fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] The eDP retrain will cause the DPCD 300 to be reset to default. And cause the brightness can't be set correctly. [How] delete the call to edp panel power control in both enable_link_output/disable_link_output entirely and only call edp panel control in enable_link_dp and  disable_link_dp once. Reviewed-by: Charlene Liu Acked-by: Qingqing Zhuo Signed-off-by: Jingwen Zhu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/dce110/dce110_hw_sequencer.c | 19 +++++++++++-------- .../gpu/drm/amd/display/dc/link/link_dpms.c | 5 +++++ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 9fe0ce91db00..8d2460d06bce 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -3031,10 +3031,12 @@ void dce110_enable_dp_link_output( const struct link_hwss *link_hwss = get_link_hwss(link, link_res); unsigned int i; - + /* + * Add the logic to extract BOTH power up and power down sequences + * from enable/disable link output and only call edp panel control + * in enable_link_dp and disable_link_dp once. + */ if (link->connector_signal == SIGNAL_TYPE_EDP) { - if (!link->dc->config.edp_no_power_sequencing) - link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); } @@ -3096,11 +3098,12 @@ void dce110_disable_link_output(struct dc_link *link, link_hwss->disable_link_output(link, link_res, signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; - - if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control) - link->dc->hwss.edp_power_control(link, false); - else if (dmcu != NULL && dmcu->funcs->lock_phy) + /* + * Add the logic to extract BOTH power up and power down sequences + * from enable/disable link output and only call edp panel control + * in enable_link_dp and disable_link_dp once. + */ + if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 027ad1f0144d..2267fb097830 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1927,6 +1927,11 @@ static void disable_link_dp(struct dc_link *link, dp_disable_link_phy(link, link_res, signal); + if (link->connector_signal == SIGNAL_TYPE_EDP) { + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, false); + } + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) /* set the sink to SST mode after disabling the link */ enable_mst_on_sink(link, false); From 7ab269d54813e03eec8a5acc415b2aef55aaf916 Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Tue, 4 Apr 2023 14:04:11 -0400 Subject: [PATCH 030/861] drm/amd/display: limit timing for single dimm memory [Why] 1. It could hit bandwidth limitdation under single dimm memory when connecting 8K external monitor. 2. IsSupportedVidPn got validation failed with 2K240Hz eDP + 8K24Hz external monitor. 3. It's better to filter out such combination in EnumVidPnCofuncModality 4. For short term, filter out in dc bandwidth validation. [How] Force 2K@240Hz+8K@24Hz timing validation false in dc. Reviewed-by: Nicholas Kazlauskas Acked-by: Qingqing Zhuo Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn314/dcn314_resource.c | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 50ed7e09d5ba..24806acc8438 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1696,6 +1696,23 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } +static bool filter_modes_for_single_channel_workaround(struct dc *dc, + struct dc_state *context) +{ + // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR + if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) { + int total_phy_pix_clk = 0; + + for (int i = 0; i < context->stream_count; i++) + if (context->res_ctx.pipe_ctx[i].stream) + total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; + + if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps + return true; + } + return false; +} + bool dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) @@ -1711,6 +1728,9 @@ bool dcn314_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (filter_modes_for_single_channel_workaround(dc, context)) + goto validate_fail; + DC_FP_START(); // do not support self refresh only out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); From bddb55ccbdc20dd7fd526c8dfb13f695637bf7b1 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 3 Apr 2023 10:13:12 -0400 Subject: [PATCH 031/861] drm/amd/display: set dcn315 lb bpp to 48 [Why & How] Fix a typo for dcn315 line buffer bpp. Reviewed-by: Jun Lei Acked-by: Qingqing Zhuo Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index b37d14369a62..59836570603a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = { .maximum_dsc_bits_per_component = 10, .dsc422_native_support = false, .is_line_buffer_bpp_fixed = true, - .line_buffer_fixed_bpp = 49, + .line_buffer_fixed_bpp = 48, .line_buffer_size_bits = 789504, .max_line_buffer_lines = 12, .writeback_interface_buffer_size_kbytes = 90, From 499e4b1c722e0e2ca40c56342b766e95f6c31f4a Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 29 May 2020 17:13:57 -0400 Subject: [PATCH 032/861] drm/amd/display: add mechanism to skip DCN init [Why] If optimized init is done in FW. DCN init can be skipped in driver. This need to be communicated between driver and fw and maintain backwards compatibility. [How] Use DMUB scratch 0 bit 2 to indicate optimized init done in fw and use DMUB scatch 4 bit 0 to indicate drive supports the optimized flow so FW will perform it. Signed-off-by: Eric Yang Acked-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 -- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 28 +++++-------------- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 2 +- .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 ++- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 1 - .../gpu/drm/amd/display/dmub/src/dmub_dcn21.c | 6 ---- .../gpu/drm/amd/display/dmub/src/dmub_dcn21.h | 4 --- .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 25 +---------------- 8 files changed, 12 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 105f705bd91a..631c6b10562e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4267,9 +4267,6 @@ void dc_set_power_state( dc_z10_restore(dc); - if (dc->ctx->dmub_srv) - dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); - dc->hwss.init_hw(dc); if (dc->hwss.init_sys_ctx != NULL && diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index eef43577508c..d15ec32243e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -179,31 +179,17 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun return true; } -void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) +bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) { - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; - enum dmub_status status; + struct dmub_srv *dmub; + union dmub_fw_boot_status status; - for (;;) { - /* Wait up to a second for PHY init. */ - status = dmub_srv_wait_for_phy_init(dmub, 1000000); - if (status == DMUB_STATUS_OK) - /* Initialization OK */ - break; + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; - DC_ERROR("DMCUB PHY init failed: status=%d\n", status); - ASSERT(0); + dmub = dc_dmub_srv->dmub; - if (status != DMUB_STATUS_TIMEOUT) - /* - * Server likely initialized or we don't have - * DMCUB HW support - this won't end. - */ - break; - - /* Continue spinning so we don't hang the ASIC. */ - } + return status.bits.optimized_init_done; } bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 22f7b2704c8e..a5196a9292b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -54,7 +54,7 @@ struct dc_dmub_srv { void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); -void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 1c3b6f25a782..a7ad1d7bc43e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1524,7 +1524,9 @@ void dcn10_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb)) hws->funcs.disable_vga(dc->hwseq); - hws->funcs.bios_golden_init(dc); + if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv)) + hws->funcs.bios_golden_init(dc); + if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index ba1715e2d25a..719bf9bb168a 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -366,7 +366,6 @@ struct dmub_srv_hw_funcs { bool (*is_hw_init)(struct dmub_srv *dmub); - bool (*is_phy_init)(struct dmub_srv *dmub); void (*enable_dmub_boot_options)(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c index 51bb9bceb1b1..2d212bc974cc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -54,9 +54,3 @@ const struct dmub_srv_common_regs dmub_srv_dcn21_regs = { #undef DMUB_SF }; -/* Shared functions. */ - -bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub) -{ - return REG_READ(DMCUB_SCRATCH10) == 0; -} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h index 6fd5b0cd4ef3..8c4033ae4007 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -32,8 +32,4 @@ extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs; -/* Hardware functions. */ - -bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub); - #endif /* _DMUB_DCN21_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 92c18bfb98b3..67c53f7e589c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -190,11 +190,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data; - if (asic == DMUB_ASIC_DCN21) { + if (asic == DMUB_ASIC_DCN21) dmub->regs = &dmub_srv_dcn21_regs; - funcs->is_phy_init = dmub_dcn21_is_phy_init; - } if (asic == DMUB_ASIC_DCN30) { dmub->regs = &dmub_srv_dcn30_regs; @@ -721,27 +719,6 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } -enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, - uint32_t timeout_us) -{ - uint32_t i = 0; - - if (!dmub->hw_init) - return DMUB_STATUS_INVALID; - - if (!dmub->hw_funcs.is_phy_init) - return DMUB_STATUS_OK; - - for (i = 0; i <= timeout_us; i += 10) { - if (dmub->hw_funcs.is_phy_init(dmub)) - return DMUB_STATUS_OK; - - udelay(10); - } - - return DMUB_STATUS_TIMEOUT; -} - enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { From c18842a2e81a3e3d5c7401f061d7887b422aeebc Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Sun, 29 May 2022 10:54:30 -0400 Subject: [PATCH 033/861] drm/amd/display: Return error code on DSC atomic check failure [Why&How] We were not returning -EINVAL on DSC atomic check fail. Add it. Fixes: 71be4b16d39a ("drm/amd/display: dsc validate fail not pass to atomic check") Reviewed-by: Aurabindo Pillai Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6e9d2e680d50..73b52bf76461 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -10170,6 +10170,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); if (ret) { DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5dc79b753d5f..810ab682f424 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1410,6 +1410,7 @@ int pre_validate_dsc(struct drm_atomic_state *state, ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); if (ret != 0) { DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; goto clean_exit; } From 08c73e896836be1a1875c612c25a9ad81893ff98 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Wed, 5 Apr 2023 16:17:42 -0400 Subject: [PATCH 034/861] drm/amd/display: remove incorrect early return [Why&How] Remove incorrect early return in a device specific fifo reset workaround Reviewed-by: Leo Li Reviewed-by: Qingqing Zhuo Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 330ab036c830..c6ce2b7123b7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -687,7 +687,6 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) return; data[0] |= (1 << 1); // set bit 1 to 1 - return; if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) return; From 2a66c0c9d20238812172693b5bef28c6c659eff4 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Thu, 17 May 2018 15:44:20 -0400 Subject: [PATCH 035/861] drm/amd/display: Write TEST_EDID_CHECKSUM_WRITE for EDID tests Extract edid's checksum and send it back for verification if EDID_TEST is requested. Signed-off-by: Mikita Lipski Acked-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index c6ce2b7123b7..09e056a64708 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -885,10 +885,34 @@ enum dc_edid_status dm_helpers_read_local_edid( DRM_ERROR("EDID err: %d, on connector: %s", edid_status, aconnector->base.name); + if (link->aux_mode) { + union test_request test_request = {0}; + union test_response test_response = {0}; - /* DP Compliance Test 4.2.2.3 */ - if (link->aux_mode) - drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); + dm_helpers_dp_read_dpcd(ctx, + link, + DP_TEST_REQUEST, + &test_request.raw, + sizeof(union test_request)); + + if (!test_request.bits.EDID_READ) + return edid_status; + + test_response.bits.EDID_CHECKSUM_WRITE = 1; + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_EDID_CHECKSUM, + &sink->dc_edid.raw_edid[sink->dc_edid.length-1], + 1); + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + + } return edid_status; } From 0c316556d124916e1dc2be171b3414b764972802 Mon Sep 17 00:00:00 2001 From: Tianci Yin Date: Mon, 6 Feb 2023 15:58:46 +0800 Subject: [PATCH 036/861] drm/amd/display: Disable migration to ensure consistency of per-CPU variable [why] Since the variable fpu_recursion_depth is per-CPU type, it has one copy on each CPU, thread migration causes data consistency issue, then the call trace shows up. And preemption disabling can't prevent migration. [how] Disable migration to ensure consistency of fpu_recursion_depth. Reviewed-by: Aurabindo Pillai Signed-off-by: Tianci Yin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index 1743ca0a3641..c42aa947c969 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -89,6 +89,7 @@ void dc_fpu_begin(const char *function_name, const int line) if (*pcpu == 1) { #if defined(CONFIG_X86) + migrate_disable(); kernel_fpu_begin(); #elif defined(CONFIG_PPC64) if (cpu_has_feature(CPU_FTR_VSX_COMP)) { @@ -129,6 +130,7 @@ void dc_fpu_end(const char *function_name, const int line) if (*pcpu <= 0) { #if defined(CONFIG_X86) kernel_fpu_end(); + migrate_enable(); #elif defined(CONFIG_PPC64) if (cpu_has_feature(CPU_FTR_VSX_COMP)) { disable_kernel_vsx(); From cd8f067a46d34dee3188da184912ae3d64d98444 Mon Sep 17 00:00:00 2001 From: Wesley Chalmers Date: Wed, 10 Jun 2020 11:49:16 -0400 Subject: [PATCH 037/861] drm/amd/display: Add logging for display MALL refresh setting [WHY] Add log entry for when display refresh from MALL settings are sent to SMU. Fixes: 1664641ea946 ("drm/amd/display: Add logger for SMU msg") Signed-off-by: Wesley Chalmers Acked-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index 1fbf1c105dc1..bdbf18306698 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -312,6 +312,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b /* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */ uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0); + smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n", + enable, cache_timer_delay, cache_timer_scale); + dcn30_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL); } From 3306ba4b60b2f3d9ac6bddc587a4d702e1ba2224 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 18 Apr 2023 00:07:18 +0200 Subject: [PATCH 038/861] drm/amd/display: fix is_timing_changed() prototype Three functions in the amdgpu display driver cause -Wmissing-prototype warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_resource.c:1858:6: error: no previous prototype for 'is_timing_changed' [-Werror=missing-prototypes] is_timing_changed() is actually meant to be a global symbol, but needs a proper name and prototype. Fixes: 17ce8a6907f7 ("drm/amd/display: Add dsc pre-validation in atomic check") Reviewed-by: Aurabindo Pillai Signed-off-by: Arnd Bergmann Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 5 ++--- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 +++--- drivers/gpu/drm/amd/display/dc/dc.h | 3 +++ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 810ab682f424..46d0a8f57e55 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -45,8 +45,7 @@ #endif #include "dc/dcn20/dcn20_resource.h" -bool is_timing_changed(struct dc_stream_state *cur_stream, - struct dc_stream_state *new_stream); + #define PEAK_FACTOR_X1000 1006 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, @@ -1422,7 +1421,7 @@ int pre_validate_dsc(struct drm_atomic_state *state, struct dc_stream_state *stream = dm_state->context->streams[i]; if (local_dc_state->streams[i] && - is_timing_changed(stream, local_dc_state->streams[i])) { + dc_is_timing_changed(stream, local_dc_state->streams[i])) { DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); } else { int ind = find_crtc_index_in_state_by_stream(state, stream); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 117d80cb36fb..2f704e26219c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1858,7 +1858,7 @@ bool dc_add_all_planes_for_stream( return add_all_planes_for_stream(dc, stream, &set, 1, context); } -bool is_timing_changed(struct dc_stream_state *cur_stream, +bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { if (cur_stream == NULL) @@ -1883,7 +1883,7 @@ static bool are_stream_backends_same( if (stream_a == NULL || stream_b == NULL) return false; - if (is_timing_changed(stream_a, stream_b)) + if (dc_is_timing_changed(stream_a, stream_b)) return false; if (stream_a->signal != stream_b->signal) @@ -3508,7 +3508,7 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc) return true; - if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) + if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) return true; if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3b53f36029d0..edef5d181590 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -2228,4 +2228,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, /* Disable acc mode Interfaces */ void dc_disable_accelerated_mode(struct dc *dc); +bool dc_is_timing_changed(struct dc_stream_state *cur_stream, + struct dc_stream_state *new_stream); + #endif /* DC_INTERFACE_H_ */ From 128c1ca0303fe764a4cde5f761e72810d9e40b6e Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 29 Apr 2022 20:41:10 -0400 Subject: [PATCH 039/861] drm/amd/display: Update DTBCLK for DCN32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why&How] - Implement interface to program DTBCLK DTO’s according to reference DTBCLK returned by PMFW - This is required because DTO programming requires exact DTBCLK reference freq or it could result in underflow Acked-by: Aurabindo Pillai Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index ea753f8fa175..2b8a81b6d53b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -233,6 +233,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) DC_FP_END(); } +static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, + struct dc_state *context, + int ref_dtbclk_khz) +{ + struct dccg *dccg = clk_mgr->dccg; + uint32_t tg_mask = 0; + int i; + + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct dtbclk_dto_params dto_params = {0}; + + /* use mask to program DTO once per tg */ + if (pipe_ctx->stream_res.tg && + !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) { + tg_mask |= (1 << pipe_ctx->stream_res.tg->inst); + + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.ref_dtbclk_khz = ref_dtbclk_khz; + + dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params); + //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params); + } + } +} + /* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming), * update DPPCLK to be the exact frequency that will be set after the DPPCLK * divider is updated. This will prevent rounding issues that could cause DPP @@ -570,6 +596,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* DCCG requires KHz precision for DTBCLK */ clk_mgr_base->clks.ref_dtbclk_khz = dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); + dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); } if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { From 97041ed37718dc9ba30aa23ca74093dc93ac89fb Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:51 +0200 Subject: [PATCH 040/861] drm/amdgpu: Increase GFX6 graphics ring size. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To ensure it supports 192 IBs per submission, so we can keep a simplified IB limit in the follow up patch without having to look at IP or GPU version. Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index c41219e23151..d9ce4d1c50e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3073,7 +3073,7 @@ static int gfx_v6_0_sw_init(void *handle) ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; sprintf(ring->name, "gfx"); - r = amdgpu_ring_init(adev, ring, 1024, + r = amdgpu_ring_init(adev, ring, 2048, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); From c30ddcece3a0a86853862a7d92678a79525ca1fb Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:52 +0200 Subject: [PATCH 041/861] drm/amdgpu: Add a max ibs per submission limit. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And ensure each ring supports that many submissions. This makes sure that we don't get surprises after the submission has been scheduled where the ring allocation actually gets rejected. My calculations on the existing limits: COMPUTE v10: 128 COMPUTE v11: 128 COMPUTE v6: 157 COMPUTE v7: 133 COMPUTE v8: 130 COMPUTE v9: 125 GFX v10: 208 GFX v11: 213 GFX v6: 154 (doubling this in the previous patch) GFX v7: 226 GFX v8: 213 GFX v9: 208 GFX v9 (SW): 208 SDMA CIK: 87 SDMA SI: 97 SDMA v2.4: 74 SDMA v3.0: 74 SDMA v4.0: 72 SDMA v5.0: 51 SDMA v6.0: 52 UVD ENC v6.0: 98 UVD ENC v7.0: 92 UVD v3.1: 124 UVD v4.2: 124 UVD v5.0: 83 UVD v6.0 (VM): 55 UVD v7.0: 51 VCE v2.0: 126 VCE v3.0 (VM): 98 VCE v4.0: 93 VCN DEC v1.0: 49 VCN DEC v2.0: 51 VCN DEC v3.0: 51 VCN ENC v1.0: 58 VCN ENC v2.0: 93 VCN ENC v3.0: 93 VCN ENC v4.0: 93 VCN JPEG v1.0: 17 VCN JPEG v2.0: 16 VCN JPEG v2.5: 17 VCN JPEG v3.0: 17 VCN JPEG v4.0: 17 Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2498 Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 29 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 + 3 files changed, 33 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 08eced097bd8..d8b3c9198d33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -112,6 +112,9 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, if (r < 0) return r; + if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type)) + return -EINVAL; + ++(num_ibs[r]); p->gang_leader_idx = r; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index dc474b809604..f676c236b657 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -49,6 +49,26 @@ * them until the pointers are equal again. */ +/** + * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission. + * + * @type: ring type for which to return the limit. + */ +unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type) +{ + switch (type) { + case AMDGPU_RING_TYPE_GFX: + /* Need to keep at least 192 on GFX7+ for old radv. */ + return 192; + case AMDGPU_RING_TYPE_COMPUTE: + return 125; + case AMDGPU_RING_TYPE_VCN_JPEG: + return 16; + default: + return 49; + } +} + /** * amdgpu_ring_alloc - allocate space on the ring buffer * @@ -182,6 +202,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, int sched_hw_submission = amdgpu_sched_hw_submission; u32 *num_sched; u32 hw_ip; + unsigned int max_ibs_dw; /* Set the hw submission limit higher for KIQ because * it's used for a number of gfx/compute tasks by both @@ -290,6 +311,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } + max_ibs_dw = ring->funcs->emit_frame_size + + amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; + max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; + + if (WARN_ON(max_ibs_dw > max_dw)) { + max_dw = max_ibs_dw; + } + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); ring->buf_mask = (ring->ring_size / 4) - 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d8749444b689..8eca6532ed19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -319,6 +319,7 @@ struct amdgpu_ring { #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) +unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_ring_ib_end(struct amdgpu_ring *ring); From 4f18b9a6711adbc7c76993c734a94ee3f5c61791 Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:53 +0200 Subject: [PATCH 042/861] drm/amdgpu: Add support for querying the max ibs in a submission. (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This info would be used by radv to figure out when we need to split a submission into multiple submissions. radv currently has a limit of 192 which seems to work for most gfx submissions, but is way too high for e.g. compute or sdma. Userspace is available at https://gitlab.freedesktop.org/bnieuwenhuizen/mesa/-/commits/ib-rejection-v3 v3: Completely rewrote based on suggestion of making it a separate query. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2498 Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 9 +++++++++ include/uapi/drm/amdgpu_drm.h | 2 ++ 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0efb38539d70..1a2e342af1c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1140,6 +1140,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) kfree(caps); return r; } + case AMDGPU_INFO_MAX_IBS: { + uint32_t max_ibs[AMDGPU_HW_IP_NUM]; + + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) + max_ibs[i] = amdgpu_ring_max_ibs(i); + + return copy_to_user(out, max_ibs, + min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0; + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index b6eb90df5d05..6981e59a9401 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -876,6 +876,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0 /* Subquery id: Encode */ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 +/* Query the max number of IBs per gang per submission */ +#define AMDGPU_INFO_MAX_IBS 0x22 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff From 7f102a906681cddb8ababe53e0caa40a17f4cd11 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 17 Apr 2023 18:42:37 +0100 Subject: [PATCH 043/861] drm/amd/pm: Fix spelling mistake "aquire" -> "acquire" There is a spelling mistake in the smu_i2c_bus_access prototype. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 5ce433e2c16a..f1580a26a850 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -359,7 +359,7 @@ struct pp_hwmgr_func { int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks); int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode); - int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire); + int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool acquire); int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state); int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, From 1fa8d710573f02ae9118bc5f53e7ede09d6920da Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Fri, 14 Apr 2023 18:39:52 +0800 Subject: [PATCH 044/861] drm/amdgpu: Fix desktop freezed after gpu-reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] After gpu-reset, sometimes the driver fails to enable vblank irq, causing flip_done timed out and the desktop freezed. During gpu-reset, we disable and enable vblank irq in dm_suspend() and dm_resume(). Later on in amdgpu_irq_gpu_reset_resume_helper(), we check irqs' refcount and decide to enable or disable the irqs again. However, we have 2 sets of API for controling vblank irq, one is dm_vblank_get/put() and another is amdgpu_irq_get/put(). Each API has its own refcount and flag to store the state of vblank irq, and they are not synchronized. In drm we use the first API to control vblank irq but in amdgpu_irq_gpu_reset_resume_helper() we use the second set of API. The failure happens when vblank irq was enabled by dm_vblank_get() before gpu-reset, we have vblank->enabled true. However, during gpu-reset, in amdgpu_irq_gpu_reset_resume_helper() vblank irq's state checked from amdgpu_irq_update() is DISABLED. So finally it disables vblank irq again. After gpu-reset, if there is a cursor plane commit, the driver will try to enable vblank irq by calling drm_vblank_enable(), but the vblank->enabled is still true, so it fails to turn on vblank irq and causes flip_done can't be completed in vblank irq handler and desktop become freezed. [How] Combining the 2 vblank control APIs by letting drm's API finally calls amdgpu_irq's API, so the irq's refcount and state of both APIs can be synchronized. Also add a check to prevent refcount from being less then 0 in amdgpu_irq_put(). v2: - Add warning in amdgpu_irq_enable() if the irq is already disabled. - Call dc_interrupt_set() in dm_set_vblank() to avoid refcount change if it is in gpu-reset. v3: - Improve commit message and code comments. Signed-off-by: Alan Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 3 +++ .../drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 17 ++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index d0a1cc88832c..fafebec5b7b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -596,6 +596,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, if (!src->enabled_types || !src->funcs->set) return -EINVAL; + if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) + return -EINVAL; + if (atomic_dec_and_test(&src->enabled_types[type])) return amdgpu_irq_update(adev, src, type); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 1d924dc51a3e..e3762e806617 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -169,10 +169,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + if (amdgpu_in_reset(adev)) { + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + rc = -EBUSY; + } else { + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); + } - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - return -EBUSY; + if (rc) + return rc; skip: if (amdgpu_in_reset(adev)) From 94344e62a9ce8abcf681390f9822a7b075cf98e2 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Fri, 14 Apr 2023 11:08:30 -0400 Subject: [PATCH 045/861] drm/amd/display: remove unused variable oldest_index cpp_check reports drivers/gpu/drm/amd/display/modules/freesync/freesync.c:1143:17: style: Variable 'oldest_index' is assigned a value that is never used. [unreadVariable] oldest_index = 0; ^ This variable is not used so remove. Signed-off-by: Tom Rix Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 5c41a4751db4..5798c0eafa1f 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -1137,10 +1137,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, if (in_out_vrr->supported && in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) { - unsigned int oldest_index = plane->time.index + 1; - - if (oldest_index >= DC_PLANE_UPDATE_TIMES_MAX) - oldest_index = 0; last_render_time_in_us = curr_time_stamp_in_us - plane->time.prev_update_time_in_us; From 20c3dffdccbd494e0dd631d1660aeecbff6775f2 Mon Sep 17 00:00:00 2001 From: Nikita Zhandarovich Date: Thu, 13 Apr 2023 08:12:28 -0700 Subject: [PATCH 046/861] radeon: avoid double free in ci_dpm_init() Several calls to ci_dpm_fini() will attempt to free resources that either have been freed before or haven't been allocated yet. This may lead to undefined or dangerous behaviour. For instance, if r600_parse_extended_power_table() fails, it might call r600_free_extended_power_table() as will ci_dpm_fini() later during error handling. Fix this by only freeing pointers to objects previously allocated. Found by Linux Verification Center (linuxtesting.org) with static analysis tool SVACE. Fixes: cc8dbbb4f62a ("drm/radeon: add dpm support for CI dGPUs (v2)") Co-developed-by: Natalia Petrova Signed-off-by: Nikita Zhandarovich Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/ci_dpm.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 8ef25ab305ae..b8f4dac68d85 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5517,6 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) u8 frev, crev; u8 *power_state_offset; struct ci_ps *ps; + int ret; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) @@ -5546,11 +5547,15 @@ static int ci_parse_power_table(struct radeon_device *rdev) non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - if (!rdev->pm.power_state[i].clock_info) - return -EINVAL; + if (!rdev->pm.power_state[i].clock_info) { + ret = -EINVAL; + goto err_free_ps; + } ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) - return -ENOMEM; + if (ps == NULL) { + ret = -ENOMEM; + goto err_free_ps; + } rdev->pm.dpm.ps[i].ps_priv = ps; ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, @@ -5590,6 +5595,12 @@ static int ci_parse_power_table(struct radeon_device *rdev) } return 0; + +err_free_ps: + for (i = 0; i < rdev->pm.dpm.num_ps; i++) + kfree(rdev->pm.dpm.ps[i].ps_priv); + kfree(rdev->pm.dpm.ps); + return ret; } static int ci_get_vbios_boot_values(struct radeon_device *rdev, @@ -5678,25 +5689,26 @@ int ci_dpm_init(struct radeon_device *rdev) ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); return ret; } ret = r600_get_platform_caps(rdev); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); return ret; } ret = r600_parse_extended_power_table(rdev); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); return ret; } ret = ci_parse_power_table(rdev); if (ret) { - ci_dpm_fini(rdev); + kfree(rdev->pm.dpm.priv); + r600_free_extended_power_table(rdev); return ret; } From 277bd3371f11400d5b02df54f057569be4b10cea Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 10:51:43 +0800 Subject: [PATCH 047/861] drm/amdgpu: convert gfx.kiq to array type (v3) v1: more kiq instances are a available in SOC (Le) v2: squash commits to avoid breaking the build (Le) v3: make the conversion for gfx/mec v11_0 (Hawking) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 6 ++-- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 6 ++-- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 6 ++-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 34 +++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 32 ++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 26 +++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 16 ++++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 24 ++++++------- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 12 +++---- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 12 +++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 +++---- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 22 ++++++------ drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 26 +++++++------- 16 files changed, 122 insertions(+), 122 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 9378fc79e9ea..f599e1e74fcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -288,7 +288,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; uint32_t mec, pipe; int r; @@ -303,7 +303,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -330,7 +330,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index ba21ec6b35e0..5c4152ae44da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -275,7 +275,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; uint32_t mec, pipe; int r; @@ -290,7 +290,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -317,7 +317,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 7e80caa05060..5cdb7289d35b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -260,7 +260,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v11_compute_mqd *m; uint32_t mec, pipe; int r; @@ -275,7 +275,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -302,7 +302,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e92b93557c13..bc944ae4fd5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -300,7 +300,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v9_mqd *m; uint32_t mec, pipe; int r; @@ -315,7 +315,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -342,7 +342,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); release_queue(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9d3a0542c996..9b6071df1fa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_irq_src *irq) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; int r = 0; spin_lock_init(&kiq->ring_lock); @@ -329,7 +329,7 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); } @@ -339,7 +339,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, { int r; u32 *hpd; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, @@ -368,7 +368,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, int r, i; /* create MQD for KIQ */ - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; if (!adev->enable_mes_kiq && !ring->mqd_obj) { /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD @@ -458,7 +458,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) &ring->mqd_ptr); } - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, @@ -467,17 +467,17 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ENOMEM; } @@ -485,9 +485,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], RESET_QUEUES, 0, 0); - if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang) + if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return r; } @@ -507,8 +507,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; uint64_t queue_mask = 0; int r, i; @@ -532,13 +532,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return r; } @@ -550,7 +550,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); if (r) DRM_ERROR("KCQ enable failed\n"); @@ -788,7 +788,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) signed long r, cnt = 0; unsigned long flags; uint32_t seq, reg_val_offs = 0, value = 0; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; if (amdgpu_device_skip_hw_access(adev)) @@ -856,7 +856,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) signed long r, cnt = 0; unsigned long flags; uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_wreg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index bfabea76d166..c742b4a36979 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -296,7 +296,7 @@ struct amdgpu_gfx { struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; - struct amdgpu_kiq kiq; + struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ const struct firmware *me_fw; /* ME firmware */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f2e2cbaa7fde..9dd474262c29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -74,7 +74,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; signed long r, cnt = 0; unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f5b5ce1051a2..d4e7de8fd9da 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3568,7 +3568,7 @@ static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, struct amdgpu_device *adev = kiq_ring->adev; uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; - if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { + if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); return; } @@ -3636,7 +3636,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs; + adev->gfx.kiq[0].pmf = &gfx_v10_0_kiq_pm4_funcs; } static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) @@ -4550,7 +4550,7 @@ static int gfx_v10_0_sw_init(void *handle) /* KIQ event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT, - &adev->gfx.kiq.irq); + &adev->gfx.kiq[0].irq); if (r) return r; @@ -4635,7 +4635,7 @@ static int gfx_v10_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -4693,7 +4693,7 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_gfx_mqd_sw_fini(adev); if (!adev->enable_mes_kiq) { - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); } @@ -6214,7 +6214,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) CP_MEC_CNTL__MEC_ME2_HALT_MASK)); break; } - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); } @@ -6524,8 +6524,8 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) #ifndef BRING_UP_DEBUG static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r, i; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -6885,7 +6885,7 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -7243,7 +7243,7 @@ static int gfx_v10_0_hw_init(void *handle) #ifndef BRING_UP_DEBUG static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i; @@ -8640,7 +8640,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; @@ -9148,7 +9148,7 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { uint32_t tmp, target; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); if (ring->me == 1) target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); @@ -9192,7 +9192,7 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { u8 me_id, pipe_id, queue_id; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; @@ -9369,7 +9369,7 @@ static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v10_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx; @@ -9403,8 +9403,8 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs; - adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; - adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs; + adev->gfx.kiq[0].irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; + adev->gfx.kiq[0].irq.funcs = &gfx_v10_0_kiq_irq_funcs; adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index a9da0486467a..6a5435255e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -192,7 +192,7 @@ static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, struct amdgpu_device *adev = kiq_ring->adev; uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; - if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { + if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); return; } @@ -260,7 +260,7 @@ static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs; + adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs; } static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) @@ -1395,7 +1395,7 @@ static int gfx_v11_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -1466,7 +1466,7 @@ static int gfx_v11_0_sw_fini(void *handle) amdgpu_gfx_mqd_sw_fini(adev); if (!adev->enable_mes_kiq) { - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); } @@ -3337,7 +3337,7 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); } - adev->gfx.kiq.ring.sched.ready = enable; + adev->gfx.kiq[0].ring.sched.ready = enable; udelay(50); } @@ -3732,8 +3732,8 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) #ifndef BRING_UP_DEBUG static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r, i; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -4108,7 +4108,7 @@ static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -4417,7 +4417,7 @@ static int gfx_v11_0_hw_init(void *handle) #ifndef BRING_UP_DEBUG static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; @@ -4432,7 +4432,7 @@ static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], PREEMPT_QUEUES, 0, 0); - if (adev->gfx.kiq.ring.sched.ready) + if (adev->gfx.kiq[0].ring.sched.ready) r = amdgpu_ring_test_helper(kiq_ring); return r; @@ -5622,7 +5622,7 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; @@ -6120,7 +6120,7 @@ static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { uint32_t tmp, target; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); target += ring->pipe; @@ -6317,7 +6317,7 @@ static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b1f2684d854a..ed04bad8543d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2021,7 +2021,7 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -2051,7 +2051,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); @@ -4292,7 +4292,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32(mmCP_MEC_CNTL, 0); } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); } @@ -4314,7 +4314,7 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; uint64_t queue_mask = 0; int r, i; @@ -4678,7 +4678,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -4741,7 +4741,7 @@ static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) if (r) return r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_ring_test_helper(ring); if (r) return r; @@ -4808,7 +4808,7 @@ static int gfx_v8_0_hw_init(void *handle) static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) { int r, i; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); if (r) @@ -7001,7 +7001,7 @@ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adbcd8127c82..adf86bc7ed36 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -898,7 +898,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs; + adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs; } static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) @@ -2174,7 +2174,7 @@ static int gfx_v9_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq; + kiq = &adev->gfx.kiq[0]; r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); if (r) return r; @@ -2216,7 +2216,7 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); @@ -3155,7 +3155,7 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); } @@ -3610,7 +3610,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -3789,10 +3789,10 @@ static int gfx_v9_0_hw_fini(void *handle) */ if (!amdgpu_in_reset(adev) && !adev->in_suspend) { mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, adev->gfx.kiq.ring.me, - adev->gfx.kiq.ring.pipe, - adev->gfx.kiq.ring.queue, 0); - gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring); + soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, + adev->gfx.kiq[0].ring.pipe, + adev->gfx.kiq[0].ring.queue, 0); + gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring); soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } @@ -3913,7 +3913,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) unsigned long flags; uint32_t seq, reg_val_offs = 0; uint64_t value = 0; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_rreg); @@ -5385,7 +5385,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; @@ -6964,7 +6964,7 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) { int i; - adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq; + adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq; for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 7d6f4a68f416..23d4081eca00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -343,7 +343,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* For SRIOV run time, driver shouldn't access the register through MMIO * Directly use kiq to do the vm invalidation instead */ - if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes && + if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && down_read_trylock(&adev->reset_domain->sem)) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; @@ -428,11 +428,11 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t queried_pasid; bool ret; u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, @@ -440,12 +440,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index d809f2ed5600..3828ca95899f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -291,7 +291,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* For SRIOV run time, driver shouldn't access the register through MMIO * Directly use kiq to do the vm invalidation instead */ - if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) && + if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; const unsigned eng = 17; @@ -329,11 +329,11 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint32_t seq; uint16_t queried_pasid; bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; if (amdgpu_emu_mode == 0 && ring->sched.ready) { - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, @@ -341,12 +341,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 64ab1a306dfe..290804a06e05 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -824,7 +824,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal */ - if (adev->gfx.kiq.ring.sched.ready && + if (adev->gfx.kiq[0].ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && down_read_trylock(&adev->reset_domain->sem)) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; @@ -934,8 +934,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t queried_pasid; bool ret; u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; if (amdgpu_in_reset(adev)) return -EIO; @@ -955,7 +955,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (vega20_xgmi_wa) ndw += kiq->pmf->invalidate_tlbs_size; - spin_lock(&adev->gfx.kiq.ring_lock); + spin_lock(&adev->gfx.kiq[0].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, ndw); if (vega20_xgmi_wa) @@ -966,13 +966,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); up_read(&adev->reset_domain->sem); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); + spin_unlock(&adev->gfx.kiq[0].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 2e2062636d5f..0599f8a6813e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -797,8 +797,8 @@ static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring) static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -863,9 +863,9 @@ static int mes_v10_1_kiq_ring_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - spin_lock_init(&adev->gfx.kiq.ring_lock); + spin_lock_init(&adev->gfx.kiq[0].ring_lock); - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; ring->me = 3; ring->pipe = 1; @@ -891,7 +891,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, struct amdgpu_ring *ring; if (pipe == AMDGPU_MES_KIQ_PIPE) - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; else if (pipe == AMDGPU_MES_SCHED_PIPE) ring = &adev->mes.ring; else @@ -974,15 +974,15 @@ static int mes_v10_1_sw_fini(void *handle) amdgpu_ucode_release(&adev->mes.fw[pipe]); } - amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj, - &adev->gfx.kiq.ring.mqd_gpu_addr, - &adev->gfx.kiq.ring.mqd_ptr); + amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj, + &adev->gfx.kiq[0].ring.mqd_gpu_addr, + &adev->gfx.kiq[0].ring.mqd_ptr); amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj, &adev->mes.ring.mqd_gpu_addr, &adev->mes.ring.mqd_ptr); - amdgpu_ring_fini(&adev->gfx.kiq.ring); + amdgpu_ring_fini(&adev->gfx.kiq[0].ring); amdgpu_ring_fini(&adev->mes.ring); amdgpu_mes_fini(adev); @@ -1038,7 +1038,7 @@ static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev) mes_v10_1_enable(adev, true); - mes_v10_1_kiq_setting(&adev->gfx.kiq.ring); + mes_v10_1_kiq_setting(&adev->gfx.kiq[0].ring); r = mes_v10_1_queue_init(adev); if (r) @@ -1090,7 +1090,7 @@ static int mes_v10_1_hw_init(void *handle) * MES uses KIQ ring exclusively so driver cannot access KIQ ring * with MES enabled. */ - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; adev->mes.ring.sched.ready = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 45280f047180..e853bcb892fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -864,8 +864,8 @@ static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring) static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; int r; if (!kiq->pmf || !kiq->pmf->kiq_map_queues) @@ -894,7 +894,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev, int r; if (pipe == AMDGPU_MES_KIQ_PIPE) - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; else if (pipe == AMDGPU_MES_SCHED_PIPE) ring = &adev->mes.ring; else @@ -961,9 +961,9 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - spin_lock_init(&adev->gfx.kiq.ring_lock); + spin_lock_init(&adev->gfx.kiq[0].ring_lock); - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; ring->me = 3; ring->pipe = 1; @@ -989,7 +989,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, struct amdgpu_ring *ring; if (pipe == AMDGPU_MES_KIQ_PIPE) - ring = &adev->gfx.kiq.ring; + ring = &adev->gfx.kiq[0].ring; else if (pipe == AMDGPU_MES_SCHED_PIPE) ring = &adev->mes.ring; else @@ -1074,15 +1074,15 @@ static int mes_v11_0_sw_fini(void *handle) amdgpu_ucode_release(&adev->mes.fw[pipe]); } - amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj, - &adev->gfx.kiq.ring.mqd_gpu_addr, - &adev->gfx.kiq.ring.mqd_ptr); + amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj, + &adev->gfx.kiq[0].ring.mqd_gpu_addr, + &adev->gfx.kiq[0].ring.mqd_ptr); amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj, &adev->mes.ring.mqd_gpu_addr, &adev->mes.ring.mqd_ptr); - amdgpu_ring_fini(&adev->gfx.kiq.ring); + amdgpu_ring_fini(&adev->gfx.kiq[0].ring); amdgpu_ring_fini(&adev->mes.ring); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { @@ -1175,7 +1175,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) mes_v11_0_enable(adev, true); - mes_v11_0_kiq_setting(&adev->gfx.kiq.ring); + mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) @@ -1196,7 +1196,7 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) { - mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring); + mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring); mes_v11_0_kiq_clear(adev); } @@ -1244,7 +1244,7 @@ static int mes_v11_0_hw_init(void *handle) * MES uses KIQ ring exclusively so driver cannot access KIQ ring * with MES enabled. */ - adev->gfx.kiq.ring.sched.ready = false; + adev->gfx.kiq[0].ring.sched.ready = false; adev->mes.ring.sched.ready = true; return 0; From be697aa3a78ef83a6b8d49e1f0671a002e502cd0 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 27 Jul 2022 14:35:49 +0800 Subject: [PATCH 048/861] drm/amdgpu: move queue_bitmap to an independent structure (v3) To allocate independent queue_bitmap for each XCD, then the old bitmap policy can be continued to use with a clear logic. Use mec_bitmap[0] as default for all non-GC 9.4.3 IPs. v2: squash commits to avoid breaking the build v3: unify naming style Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 39 ++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 7 +++- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 +-- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 6 +-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 5 ++- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ++- 9 files changed, 47 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0385f7f69278..fed8bb9a721f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -162,7 +162,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * clear */ bitmap_complement(gpu_resources.cp_queue_bitmap, - adev->gfx.mec.queue_bitmap, + adev->gfx.mec_bitmap[0].queue_bitmap, KGD_MAX_QUEUES); /* According to linux/bitmap.h we shouldn't use bitmap_clear if diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index bc944ae4fd5b..03875b971ba6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -778,7 +778,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, * Iterate through the shader engines and arrays of the device * to get number of waves in flight */ - bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap, + bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap, KGD_MAX_QUEUES); max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9b6071df1fa7..b300b1784210 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -63,10 +63,10 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, } bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, - int mec, int pipe, int queue) + int xcc_id, int mec, int pipe, int queue) { return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), - adev->gfx.mec.queue_bitmap); + adev->gfx.mec_bitmap[xcc_id].queue_bitmap); } int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, @@ -204,29 +204,38 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe; + int i, j, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe, adev->gfx.num_compute_rings); + int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1; if (multipipe_policy) { - /* policy: make queues evenly cross all pipes on MEC1 only */ - for (i = 0; i < max_queues_per_mec; i++) { - pipe = i % adev->gfx.mec.num_pipe_per_mec; - queue = (i / adev->gfx.mec.num_pipe_per_mec) % - adev->gfx.mec.num_queue_per_pipe; + /* policy: make queues evenly cross all pipes on MEC1 only + * for multiple xcc, just use the original policy for simplicity */ + for (j = 0; j < num_xcd; j++) { + for (i = 0; i < max_queues_per_mec; i++) { + pipe = i % adev->gfx.mec.num_pipe_per_mec; + queue = (i / adev->gfx.mec.num_pipe_per_mec) % + adev->gfx.mec.num_queue_per_pipe; - set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, - adev->gfx.mec.queue_bitmap); + set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, + adev->gfx.mec_bitmap[j].queue_bitmap); + } } } else { /* policy: amdgpu owns all queues in the given pipe */ - for (i = 0; i < max_queues_per_mec; ++i) - set_bit(i, adev->gfx.mec.queue_bitmap); + for (j = 0; j < num_xcd; j++) { + for (i = 0; i < max_queues_per_mec; ++i) + set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); + } } - dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); + for (j = 0; j < num_xcd; j++) { + dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", + bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); + } } void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) @@ -268,7 +277,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_queue_per_pipe; while (--queue_bit >= 0) { - if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) + if (test_bit(queue_bit, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); @@ -516,7 +525,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) return -EINVAL; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index c742b4a36979..830323310694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -76,7 +76,9 @@ struct amdgpu_mec { u32 num_pipe_per_mec; u32 num_queue_per_pipe; void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; +}; +struct amdgpu_mec_bitmap { /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); }; @@ -296,6 +298,7 @@ struct amdgpu_gfx { struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; + struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ @@ -425,8 +428,8 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); -bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, - int pipe, int queue); +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int inst, + int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d4e7de8fd9da..88f8424ea1e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4219,7 +4219,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr = NULL; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -4614,8 +4614,8 @@ static int gfx_v10_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, - j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v10_0_compute_ring_init(adev, ring_id, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 6a5435255e6d..3e42a44f10a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -699,7 +699,7 @@ static int gfx_v11_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -1374,8 +1374,8 @@ static int gfx_v11_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, - j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v11_0_compute_ring_init(adev, ring_id, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 9d5c1e29b4a3..46740ad9a80f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2728,7 +2728,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -4456,7 +4456,8 @@ static int gfx_v7_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v7_0_compute_ring_init(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ed04bad8543d..18722450e265 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1304,7 +1304,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2001,7 +2001,8 @@ static int gfx_v8_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v8_0_compute_ring_init(adev, @@ -4319,7 +4320,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) int r, i; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adf86bc7ed36..49adc36dcc6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1713,7 +1713,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2154,7 +2154,8 @@ static int gfx_v9_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v9_0_compute_ring_init(adev, From c38be07035bcb31274ce5f85e3b249f691c5b8db Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 16 Nov 2021 21:56:34 +0800 Subject: [PATCH 049/861] drm/amdgpu: separate the mqd_backup for kiq from kcq This will benifit the mqd indexing for kiq/kcq in multi XCD case. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 830323310694..d811cb038e94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -75,7 +75,7 @@ struct amdgpu_mec { u32 num_mec; u32 num_pipe_per_mec; u32 num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; + void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; }; struct amdgpu_mec_bitmap { @@ -122,6 +122,7 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; + void *mqd_backup; }; /* From def799c6596d078112095c24c25e162cb5102d90 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 12:23:03 +0800 Subject: [PATCH 050/861] drm/amdgpu: add multi-xcc support to amdgpu_gfx interfaces (v4) v1: Modify kiq_init/fini, mqd_sw_init/fini and enable/disable_kcq to adapt to multi-die case. Pass 0 as default to all asics with single xcc (Le) v2: squash commits to avoid breaking the build (Le) v3: unify naming style (Le) v4: apply the changes to gc v11_0 (Hawking) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 75 ++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 16 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 23 ++++---- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 23 ++++---- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 19 +++---- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 25 ++++----- 6 files changed, 93 insertions(+), 88 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b300b1784210..7f5c60381103 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -267,7 +267,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) } static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, - struct amdgpu_ring *ring) + struct amdgpu_ring *ring, int xcc_id) { int queue_bit; int mec, pipe, queue; @@ -277,7 +277,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_queue_per_pipe; while (--queue_bit >= 0) { - if (test_bit(queue_bit, adev->gfx.mec_bitmap[0].queue_bitmap)) + if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) continue; amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); @@ -303,9 +303,9 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq) + struct amdgpu_irq_src *irq, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; int r = 0; spin_lock_init(&kiq->ring_lock); @@ -314,15 +314,16 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->ring_obj = NULL; ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; + ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB_0; - r = amdgpu_gfx_kiq_acquire(adev, ring); + r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) return r; ring->eop_gpu_addr = kiq->eop_gpu_addr; ring->no_scheduler = true; - sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); + sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue); r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) @@ -336,19 +337,19 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) amdgpu_ring_fini(ring); } -void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) +void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); } int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size) + unsigned hpd_size, int xcc_id) { int r; u32 *hpd; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, @@ -371,13 +372,13 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, /* create MQD for each compute/gfx queue */ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size) + unsigned mqd_size, int xcc_id) { - struct amdgpu_ring *ring = NULL; int r, i; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *ring = &kiq->ring; /* create MQD for KIQ */ - ring = &adev->gfx.kiq[0].ring; if (!adev->enable_mes_kiq && !ring->mqd_obj) { /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD @@ -396,8 +397,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) + kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); + if (!kiq->mqd_backup) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } @@ -424,7 +425,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, /* create MQD for each KCQ */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; + ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; if (!ring->mqd_obj) { r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, @@ -435,7 +436,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.mec.mqd_backup[i + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[i]) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } @@ -444,10 +445,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, return 0; } -void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) +void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring = NULL; - int i; + int i, j; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) { @@ -460,6 +462,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) } for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; ring = &adev->gfx.compute_ring[i]; kfree(adev->gfx.mec.mqd_backup[i]); amdgpu_bo_free_kernel(&ring->mqd_obj, @@ -467,36 +470,40 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) &ring->mqd_ptr); } - ring = &adev->gfx.kiq[0].ring; + ring = &kiq->ring; + kfree(kiq->mqd_backup); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); } -int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) +int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; + int j; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ENOMEM; } - for (i = 0; i < adev->gfx.num_compute_rings; i++) + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], RESET_QUEUES, 0, 0); + } if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&kiq->ring_lock); return r; } @@ -514,18 +521,18 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, return set_resource_bit; } -int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) +int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; uint64_t queue_mask = 0; - int r, i; + int r, i, j; if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) return -EINVAL; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW @@ -541,7 +548,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); @@ -555,11 +562,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) queue_mask = ~0ULL; kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); - for (i = 0; i < adev->gfx.num_compute_rings; i++) + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); + } r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&kiq->ring_lock); if (r) DRM_ERROR("KCQ enable failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index d811cb038e94..a9e41d7970ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -408,19 +408,19 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); + struct amdgpu_irq_src *irq, int xcc_id); void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); -void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); +void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size); + unsigned hpd_size, int xcc_id); int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size); -void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev); -int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev); -int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev); + unsigned mqd_size, int xcc_id); +void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); @@ -429,7 +429,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); -bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int inst, +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 88f8424ea1e4..accc0a7251b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4629,19 +4629,19 @@ static int gfx_v10_0_sw_init(void *handle) } if (!adev->enable_mes_kiq) { - r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; } - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd), 0); if (r) return r; @@ -4690,11 +4690,11 @@ static int gfx_v10_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); if (!adev->enable_mes_kiq) { amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); } gfx_v10_0_pfp_fini(adev); @@ -6812,14 +6812,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v10_compute_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v10_0_kiq_setting(ring); if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -6841,8 +6840,8 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -6927,7 +6926,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev); + r = amdgpu_gfx_enable_kcq(adev, 0); done: return r; } @@ -7280,7 +7279,7 @@ static int gfx_v10_0_hw_fini(void *handle) DRM_ERROR("KGQ disable failed\n"); } #endif - if (amdgpu_gfx_disable_kcq(adev)) + if (amdgpu_gfx_disable_kcq(adev, 0)) DRM_ERROR("KCQ disable failed\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3e42a44f10a7..4fbefe236fc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1389,19 +1389,19 @@ static int gfx_v11_0_sw_init(void *handle) } if (!adev->enable_mes_kiq) { - r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; } - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0); if (r) return r; @@ -1463,11 +1463,11 @@ static int gfx_v11_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); if (!adev->enable_mes_kiq) { amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); } gfx_v11_0_pfp_fini(adev); @@ -4035,14 +4035,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v11_compute_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v11_0_kiq_setting(ring); if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -4064,8 +4063,8 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -4153,7 +4152,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev); + r = amdgpu_gfx_enable_kcq(adev, 0); done: return r; } @@ -4456,7 +4455,7 @@ static int gfx_v11_0_hw_fini(void *handle) DRM_ERROR("KGQ disable failed\n"); } #endif - if (amdgpu_gfx_disable_kcq(adev)) + if (amdgpu_gfx_disable_kcq(adev, 0)) DRM_ERROR("KCQ disable failed\n"); amdgpu_mes_kiq_hw_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 18722450e265..8a43e87de49f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2016,19 +2016,19 @@ static int gfx_v8_0_sw_init(void *handle) } } - r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; /* create MQD for all compute queues as well as KIQ for SRIOV case */ - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0); if (r) return r; @@ -2051,9 +2051,9 @@ static int gfx_v8_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); gfx_v8_0_mec_fini(adev); amdgpu_gfx_rlc_fini(adev); @@ -4596,14 +4596,13 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct vi_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v8_0_kiq_setting(ring); if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; @@ -4626,8 +4625,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation)); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 49adc36dcc6f..62af92e5be51 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2169,19 +2169,19 @@ static int gfx_v9_0_sw_init(void *handle) } } - r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; /* create MQD for all compute queues as wel as KIQ for SRIOV case */ - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0); if (r) return r; @@ -2216,9 +2216,9 @@ static int gfx_v9_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); gfx_v9_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, @@ -3520,7 +3520,6 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; struct v9_mqd *tmp_mqd; gfx_v9_0_kiq_setting(ring); @@ -3530,11 +3529,11 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) * driver need to re-init the mqd. * check mqd->cp_hqd_pq_control since this value should not be 0 */ - tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; + tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup; if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){ /* for GPU_RESET case , reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; @@ -3558,8 +3557,8 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); } return 0; @@ -3653,7 +3652,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev); + r = amdgpu_gfx_enable_kcq(adev, 0); done: return r; } @@ -3772,7 +3771,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* DF freeze and kcq disable will fail */ if (!amdgpu_ras_intr_triggered()) /* disable KCQ to avoid CPC touch memory not valid anymore */ - amdgpu_gfx_disable_kcq(adev); + amdgpu_gfx_disable_kcq(adev, 0); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); From 86301129698be52f8398f92ea8564168f6bfcae1 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 10:09:39 +0800 Subject: [PATCH 051/861] drm/amdgpu: split gc v9_4_3 functionality from gc v9_0 To prepare for gc v9_4_3 specific feature. v2: fix exports (Alex) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 2646 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h | 3 +- 2 files changed, 2634 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 5f8500577c02..212d48114da8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -25,16 +25,292 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "vega10_enum.h" +#include "clearstate_gfx9.h" +#include "v9_structs.h" + +#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" + #include "gc/gc_9_4_3_offset.h" #include "gc/gc_9_4_3_sh_mask.h" #include "gfx_v9_4_3.h" +MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); +MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); + +#define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L +static const struct soc15_reg_golden golden_settings_gc_9_4_3[] = { + +}; + +static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); +static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); +static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); +static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); +static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, + struct amdgpu_cu_info *cu_info); + +static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, + uint64_t queue_mask) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); + amdgpu_ring_write(kiq_ring, + PACKET3_SET_RESOURCES_VMID_MASK(0) | + /* vmid_mask:0* queue_type:0 (KIQ) */ + PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); + amdgpu_ring_write(kiq_ring, + lower_32_bits(queue_mask)); /* queue mask lo */ + amdgpu_ring_write(kiq_ring, + upper_32_bits(queue_mask)); /* queue mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* oac mask */ + amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ +} + +static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(ring->queue) | + PACKET3_MAP_QUEUES_PIPE(ring->pipe) | + PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | + /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | + /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | + PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | + /* num_queues: must be 1 */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); +} + +static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + enum amdgpu_unmap_queues_action action, + u64 gpu_addr, u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_UNMAP_QUEUES_ACTION(action) | + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | + PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + + if (action == PREEMPT_QUEUES_NO_UNMAP) { + amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, seq); + } else { + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } +} + +static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + u64 addr, + u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_CONTEXT_ID(0) | + PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | + PACKET3_QUERY_STATUS_COMMAND(2)); + /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | + PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); + amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); + amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); +} + +static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(kiq_ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); +} + +static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { + .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, + .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, + .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, + .kiq_query_status = gfx_v9_4_3_kiq_query_status, + .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, + .set_resources_size = 8, + .map_queues_size = 7, + .unmap_queues_size = 6, + .query_status_size = 7, + .invalidate_tlbs_size = 2, +}; + +static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) +{ + adev->gfx.kiq[0].pmf = &gfx_v9_4_3_kiq_pm4_funcs; +} + +static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) +{ + +} + +static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, + bool wc, uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | + WRITE_DATA_DST_SEL(0) | + (wc ? WR_CONFIRM : 0)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); +} + +static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, + int mem_space, int opt, uint32_t addr0, + uint32_t addr1, uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, + /* memory (1) or register (0) */ + (WAIT_REG_MEM_MEM_SPACE(mem_space) | + WAIT_REG_MEM_OPERATION(opt) | /* wait */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(eng_sel))); + + if (mem_space) + BUG_ON(addr0 & 0x3); /* Dword align */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + amdgpu_ring_write(ring, ref); + amdgpu_ring_write(ring, mask); + amdgpu_ring_write(ring, inv); /* poll interval */ +} + +static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32_SOC15(GC, 0, regSCRATCH_REG0, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 3); + if (r) + return r; + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0) - + PACKET3_SET_UCONFIG_REG_START); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regSCRATCH_REG0); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + return r; +} + +static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct dma_fence *f = NULL; + + unsigned index; + uint64_t gpu_addr; + uint32_t tmp; + long r; + + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; + + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) + goto err1; + + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; + ib.ptr[2] = lower_32_bits(gpu_addr); + ib.ptr[3] = upper_32_bits(gpu_addr); + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; + + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + if (r) + goto err2; + + r = dma_fence_wait_timeout(f, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto err2; + } else if (r < 0) { + goto err2; + } + + tmp = adev->wb.wb[index]; + if (tmp == 0xDEADBEEF) + r = 0; + else + r = -EINVAL; + +err2: + amdgpu_ib_free(adev, &ib, NULL); + dma_fence_put(f); +err1: + amdgpu_device_wb_free(adev, index); + return r; +} + + +/* This value might differs per partition */ static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -50,6 +326,241 @@ static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) return clock; } +static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev) +{ + amdgpu_ucode_release(&adev->gfx.pfp_fw); + amdgpu_ucode_release(&adev->gfx.me_fw); + amdgpu_ucode_release(&adev->gfx.ce_fw); + amdgpu_ucode_release(&adev->gfx.rlc_fw); + amdgpu_ucode_release(&adev->gfx.mec_fw); + amdgpu_ucode_release(&adev->gfx.mec2_fw); + + kfree(adev->gfx.rlc.register_list_format); +} + +static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev, + const char *chip_name) +{ + char fw_name[30]; + int err; + const struct rlc_firmware_header_v2_0 *rlc_hdr; + uint16_t version_major; + uint16_t version_minor; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); + + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); + if (err) + goto out; + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); + err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); +out: + if (err) + amdgpu_ucode_release(&adev->gfx.rlc_fw); + + return err; +} + +static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev) +{ + return true; +} + +static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev) +{ + if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; +} + +static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, + const char *chip_name) +{ + char fw_name[30]; + int err; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); + + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); + if (err) + goto out; + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); + + adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; + adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; + + gfx_v9_4_3_check_if_need_gfxoff(adev); + +out: + if (err) + amdgpu_ucode_release(&adev->gfx.mec_fw); + return err; +} + +static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + int r; + + chip_name = "gc_9_4_3"; + + r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name); + if (r) + return r; + + r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name); + if (r) + return r; + + return r; +} + +static u32 gfx_v9_4_3_get_csb_size(struct amdgpu_device *adev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +static void gfx_v9_4_3_get_csb_buffer(struct amdgpu_device *adev, + volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (adev->gfx.rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - + PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; + } + } + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + +static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); +} + +static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) +{ + int r, i; + u32 *hpd; + const __le32 *fw_data; + unsigned fw_size; + u32 *fw; + size_t mec_hpd_size; + + const struct gfx_firmware_header_v1_0 *mec_hdr; + + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + + /* take ownership of the relevant compute queues */ + amdgpu_gfx_compute_queue_acquire(adev); + mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + gfx_v9_4_3_mec_fini(adev); + return r; + } + + if (amdgpu_emu_mode == 1) { + for (i = 0; i < mec_hpd_size / 4; i++) { + memset((void *)(hpd + i), 0, 4); + if (i % 50 == 0) + msleep(1); + } + } else { + memset(hpd, 0, mec_hpd_size); + } + + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } + + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); + + r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.mec_fw_obj, + &adev->gfx.mec.mec_fw_gpu_addr, + (void **)&fw); + if (r) { + dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); + gfx_v9_4_3_mec_fini(adev); + return r; + } + + memcpy(fw, fw_data, fw_size); + + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); + + return 0; +} + static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, @@ -150,6 +661,400 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, soc15_grbm_select(adev, me, pipe, q, vm); } +static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, + .select_se_sh = &gfx_v9_4_3_select_se_sh, + .read_wave_data = &gfx_v9_4_3_read_wave_data, + .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, + .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, +}; + +static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) +{ + u32 gb_addr_config; + + adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 3): + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + break; + default: + BUG(); + break; + } + + adev->gfx.config.gb_addr_config = gb_addr_config; + + adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << + REG_GET_FIELD( + adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, + NUM_PIPES); + + adev->gfx.config.max_tile_pipes = + adev->gfx.config.gb_addr_config_fields.num_pipes; + + adev->gfx.config.gb_addr_config_fields.num_banks = 1 << + REG_GET_FIELD( + adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, + NUM_BANKS); + adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << + REG_GET_FIELD( + adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, + MAX_COMPRESSED_FRAGS); + adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << + REG_GET_FIELD( + adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, + NUM_RB_PER_SE); + adev->gfx.config.gb_addr_config_fields.num_se = 1 << + REG_GET_FIELD( + adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, + NUM_SHADER_ENGINES); + adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + + REG_GET_FIELD( + adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, + PIPE_INTERLEAVE_SIZE)); + + return 0; +} + +static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, + int mec, int pipe, int queue) +{ + unsigned irq_type; + struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + unsigned int hw_prio; + + ring = &adev->gfx.compute_ring[ring_id]; + + /* mec0 is me1 */ + ring->me = mec + 1; + ring->pipe = pipe; + ring->queue = queue; + + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; + ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + + (ring_id * GFX9_MEC_HPD_SIZE); + ring->vm_hub = AMDGPU_GFXHUB_0; + sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); + + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + + ring->pipe; + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + hw_prio, NULL); +} + +static int gfx_v9_4_3_sw_init(void *handle) +{ + int i, j, k, r, ring_id; + struct amdgpu_kiq *kiq; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_pipe_per_mec = 4; + adev->gfx.mec.num_queue_per_pipe = 8; + + /* EOP Event */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); + if (r) + return r; + + /* Privileged reg */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, + &adev->gfx.priv_reg_irq); + if (r) + return r; + + /* Privileged inst */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, + &adev->gfx.priv_inst_irq); + if (r) + return r; + + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; + + r = adev->gfx.rlc.funcs->init(adev); + if (r) { + DRM_ERROR("Failed to init rlc BOs!\n"); + return r; + } + + r = gfx_v9_4_3_mec_init(adev); + if (r) { + DRM_ERROR("Failed to init MEC BOs!\n"); + return r; + } + + /* set up the compute queues - allocate horizontally across pipes */ + ring_id = 0; + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) + continue; + + r = gfx_v9_4_3_compute_ring_init(adev, + ring_id, + i, k, j); + if (r) + return r; + + ring_id++; + } + } + } + + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); + if (r) { + DRM_ERROR("Failed to init KIQ BOs!\n"); + return r; + } + + kiq = &adev->gfx.kiq[0]; + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); + if (r) + return r; + + /* create MQD for all compute queues as wel as KIQ for SRIOV case */ + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0); + if (r) + return r; + + r = gfx_v9_4_3_gpu_early_init(adev); + if (r) + return r; + + return 0; +} + +static int gfx_v9_4_3_sw_fini(void *handle) +{ + int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + + amdgpu_gfx_mqd_sw_fini(adev, 0); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); + amdgpu_gfx_kiq_fini(adev, 0); + + gfx_v9_4_3_mec_fini(adev); + amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + gfx_v9_4_3_free_microcode(adev); + + return 0; +} + +static u32 gfx_v9_4_3_get_rb_active_bitmap(struct amdgpu_device *adev) +{ + u32 data, mask; + + data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); + data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); + + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; + data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + + mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); + + return (~data) & mask; +} + +static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev) +{ + int i, j; + u32 data; + u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + data = gfx_v9_4_3_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); + } + } + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + adev->gfx.config.backend_enable_mask = active_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); +} + +#define DEFAULT_SH_MEM_BASES (0x6000) +static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev) +{ + int i; + uint32_t sh_mem_config; + uint32_t sh_mem_bases; + + /* + * Configure apertures: + * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) + * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) + * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) + */ + sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); + + sh_mem_config = SH_MEM_ADDRESS_MODE_64 | + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; + + mutex_lock(&adev->srbm_mutex); + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { + soc15_grbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, sh_mem_bases); + } + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); + } +} + +static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); + } +} + +static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) +{ + u32 tmp; + int i; + + WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); + + gfx_v9_4_3_setup_rb(adev); + gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); + adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, regDB_DEBUG2); + + /* XXX SH_MEM regs */ + /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + soc15_grbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + if (i == 0) { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + SH_MEM_ALIGNMENT_MODE_UNALIGNED); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, + !!adev->gmc.noretry); + WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, tmp); + WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, 0); + } else { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + SH_MEM_ALIGNMENT_MODE_UNALIGNED); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, + !!adev->gmc.noretry); + WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, tmp); + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, + (adev->gmc.private_aperture_start >> 48)); + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, + (adev->gmc.shared_aperture_start >> 48)); + WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, tmp); + } + } + soc15_grbm_select(adev, 0, 0, 0, 0); + + mutex_unlock(&adev->srbm_mutex); + + gfx_v9_4_3_init_compute_vmid(adev); + gfx_v9_4_3_init_gds_vmid(adev); +} + +static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev) +{ + WREG32_FIELD15_PREREG(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); +} + +static void gfx_v9_4_3_init_csb(struct amdgpu_device *adev) +{ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); + /* csib */ + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regRLC_CSIB_ADDR_HI), + adev->gfx.rlc.clear_state_gpu_addr >> 32); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regRLC_CSIB_ADDR_LO), + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regRLC_CSIB_LENGTH), + adev->gfx.rlc.clear_state_size); +} + +static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev) +{ + gfx_v9_4_3_init_csb(adev); + + /* + * Rlc save restore list is workable since v2_1. + * And it's needed by gfxoff feature. + */ + if (adev->gfx.rlc.is_rlc_v2_1) + gfx_v9_4_3_enable_save_restore_machine(adev); + + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { + WREG32_SOC15(GC, 0, regRLC_JUMP_TABLE_RESTORE, + adev->gfx.rlc.cp_table_gpu_addr >> 8); + } +} + +void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); + data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; + WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); +} + static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) { uint32_t rlc_setting; @@ -189,6 +1094,20 @@ static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev) static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) { + const struct cs_section_def *cs_data; + int r; + + adev->gfx.rlc.cs_data = gfx9_cs_data; + + cs_data = adev->gfx.rlc.cs_data; + + if (cs_data) { + /* init clear state block */ + r = amdgpu_gfx_rlc_init_csb(adev); + if (r) + return r; + } + /* init spm vmid with 0xf */ if (adev->gfx.rlc.funcs->update_spm_vmid) adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); @@ -246,8 +1165,6 @@ static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); - if (adev->gfx.num_gfx_rings) - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); } @@ -339,8 +1256,7 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); - /* TODO: revisit pg function */ - /* gfx_v9_4_3_init_pg(adev);*/ + gfx_v9_4_3_init_pg(adev); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { /* legacy rlc firmware loading */ @@ -407,20 +1323,899 @@ static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offs ARRAY_SIZE(rlcg_access_gc_9_4_3)); } -const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { - .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, - .select_se_sh = &gfx_v9_4_3_select_se_sh, - .read_wave_data = &gfx_v9_4_3_read_wave_data, - .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, - .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, - .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, -}; +static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) { + WREG32_SOC15_RLC(GC, 0, regCP_MEC_CNTL, 0); + } else { + WREG32_SOC15_RLC(GC, 0, regCP_MEC_CNTL, + (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + adev->gfx.kiq[0].ring.sched.ready = false; + } + udelay(50); +} -const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { +static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v1_0 *mec_hdr; + const __le32 *fw_data; + unsigned i; + u32 tmp; + u32 mec_ucode_addr_offset; + u32 mec_ucode_data_offset; + + if (!adev->gfx.mec_fw) + return -EINVAL; + + gfx_v9_4_3_cp_compute_enable(adev, false); + + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); + tmp = 0; + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); + + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, + adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, + upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); + + mec_ucode_addr_offset = + SOC15_REG_OFFSET(GC, 0, regCP_MEC_ME1_UCODE_ADDR); + mec_ucode_data_offset = + SOC15_REG_OFFSET(GC, 0, regCP_MEC_ME1_UCODE_DATA); + + /* MEC1 */ + WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); + for (i = 0; i < mec_hdr->jt_size; i++) + WREG32(mec_ucode_data_offset, + le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); + + WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version); + /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ + + return 0; +} + +/* KIQ functions */ +static void gfx_v9_4_3_kiq_setting(struct amdgpu_ring *ring) +{ + uint32_t tmp; + struct amdgpu_device *adev = ring->adev; + + /* tell RLC which is KIQ queue */ + tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); + WREG32_SOC15_RLC(GC, 0, regRLC_CP_SCHEDULERS, tmp); + tmp |= 0x80; + WREG32_SOC15_RLC(GC, 0, regRLC_CP_SCHEDULERS, tmp); +} + +static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } + } +} + +static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v9_mqd *mqd = ring->mqd_ptr; + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; + uint32_t tmp; + + mqd->header = 0xC0310800; + mqd->compute_pipelinestat_enable = 0x00000001; + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_misc_reserved = 0x00000003; + + mqd->dynamic_cu_mask_addr_lo = + lower_32_bits(ring->mqd_gpu_addr + + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); + mqd->dynamic_cu_mask_addr_hi = + upper_32_bits(ring->mqd_gpu_addr + + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); + + eop_base_addr = ring->eop_gpu_addr >> 8; + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, + (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); + + mqd->cp_hqd_eop_control = tmp; + + /* enable doorbell? */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + + if (ring->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } else { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + } + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* disable the queue if it's active */ + ring->wptr = 0; + mqd->cp_hqd_dequeue_request = 0; + mqd->cp_hqd_pq_rptr = 0; + mqd->cp_hqd_pq_wptr_lo = 0; + mqd->cp_hqd_pq_wptr_hi = 0; + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); + + /* set MQD vmid to 0 */ + tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); + mqd->cp_mqd_control = tmp; + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = ring->gpu_addr >> 8; + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, + (order_base_2(ring->ring_size / 4) - 1)); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); +#endif + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + mqd->cp_hqd_pq_control = tmp; + + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + ring->wptr = 0; + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); + + /* set the vmid for the queue */ + mqd->cp_hqd_vmid = 0; + + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); + mqd->cp_hqd_persistent_state = tmp; + + /* set MIN_IB_AVAIL_SIZE */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); + mqd->cp_hqd_ib_control = tmp; + + /* set static priority for a queue/ring */ + gfx_v9_4_3_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(regCP_HQD_QUANTUM); + + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; + + return 0; +} + +static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v9_mqd *mqd = ring->mqd_ptr; + int j; + + /* disable wptr polling */ + WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); + + WREG32_SOC15_RLC(GC, 0, regCP_HQD_EOP_BASE_ADDR, + mqd->cp_hqd_eop_base_addr_lo); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, + mqd->cp_hqd_eop_base_addr_hi); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_EOP_CONTROL, + mqd->cp_hqd_eop_control); + + /* enable doorbell? */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, + mqd->cp_hqd_pq_doorbell_control); + + /* disable the queue if it's active */ + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, + mqd->cp_hqd_dequeue_request); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR, + mqd->cp_hqd_pq_rptr); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_LO, + mqd->cp_hqd_pq_wptr_lo); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_HI, + mqd->cp_hqd_pq_wptr_hi); + } + + /* set the pointer to the MQD */ + WREG32_SOC15_RLC(GC, 0, regCP_MQD_BASE_ADDR, + mqd->cp_mqd_base_addr_lo); + WREG32_SOC15_RLC(GC, 0, regCP_MQD_BASE_ADDR_HI, + mqd->cp_mqd_base_addr_hi); + + /* set MQD vmid to 0 */ + WREG32_SOC15_RLC(GC, 0, regCP_MQD_CONTROL, + mqd->cp_mqd_control); + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_BASE, + mqd->cp_hqd_pq_base_lo); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_BASE_HI, + mqd->cp_hqd_pq_base_hi); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_CONTROL, + mqd->cp_hqd_pq_control); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->cp_hqd_pq_rptr_report_addr_lo); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->cp_hqd_pq_rptr_report_addr_hi); + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, + mqd->cp_hqd_pq_wptr_poll_addr_lo); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, + mqd->cp_hqd_pq_wptr_poll_addr_hi); + + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, + (adev->doorbell_index.kiq * 2) << 2); + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, + (adev->doorbell_index.userqueue_end * 2) << 2); + } + + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, + mqd->cp_hqd_pq_doorbell_control); + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_LO, + mqd->cp_hqd_pq_wptr_lo); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_HI, + mqd->cp_hqd_pq_wptr_hi); + + /* set the vmid for the queue */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); + + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PERSISTENT_STATE, + mqd->cp_hqd_persistent_state); + + /* activate the queue */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_ACTIVE, + mqd->cp_hqd_active); + + if (ring->use_doorbell) + WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + + return 0; +} + +static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + int j; + + /* disable the queue if it's active */ + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + + WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + + if (j == AMDGPU_MAX_USEC_TIMEOUT) { + DRM_DEBUG("KIQ dequeue request failed.\n"); + + /* Manual disable if dequeue request times out */ + WREG32_SOC15_RLC(GC, 0, regCP_HQD_ACTIVE, 0); + } + + WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, + 0); + } + + WREG32_SOC15_RLC(GC, 0, regCP_HQD_IQ_TIMER, 0); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_IB_CONTROL, 0); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PERSISTENT_STATE, 0); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR, 0); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); + WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); + + return 0; +} + +static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v9_mqd *mqd = ring->mqd_ptr; + struct v9_mqd *tmp_mqd; + + gfx_v9_4_3_kiq_setting(ring); + + /* GPU could be in bad state during probe, driver trigger the reset + * after load the SMU, in this case , the mqd is not be initialized. + * driver need to re-init the mqd. + * check mqd->cp_hqd_pq_control since this value should not be 0 + */ + tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup; + if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { + /* for GPU_RESET case , reset MQD to a clean status */ + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation)); + + /* reset ring buffer */ + ring->wptr = 0; + amdgpu_ring_clear_ring(ring); + + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + gfx_v9_4_3_kiq_init_register(ring); + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } else { + memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); + ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + gfx_v9_4_3_mqd_init(ring); + gfx_v9_4_3_kiq_init_register(ring); + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); + } + + return 0; +} + +static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v9_mqd *mqd = ring->mqd_ptr; + int mqd_idx = ring - &adev->gfx.compute_ring[0]; + struct v9_mqd *tmp_mqd; + + /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control + * is not be initialized before + */ + tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; + + if (!tmp_mqd->cp_hqd_pq_control || + (!amdgpu_in_reset(adev) && !adev->in_suspend)) { + memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); + ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + gfx_v9_4_3_mqd_init(ring); + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + if (adev->gfx.mec.mqd_backup[mqd_idx]) + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ + /* reset MQD to a clean status */ + if (adev->gfx.mec.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); + + /* reset ring buffer */ + ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); + amdgpu_ring_clear_ring(ring); + } else { + amdgpu_ring_clear_ring(ring); + } + + return 0; +} + +static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + ring = &adev->gfx.kiq[0].ring; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (unlikely(r != 0)) + return r; + + gfx_v9_4_3_kiq_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + amdgpu_bo_unreserve(ring->mqd_obj); + ring->sched.ready = true; + return 0; +} + +static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int r = 0, i; + + gfx_v9_4_3_cp_compute_enable(adev, true); + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + goto done; + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v9_4_3_kcq_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) + goto done; + } + + r = amdgpu_gfx_enable_kcq(adev, 0); +done: + return r; +} + +static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) +{ + int r, i; + struct amdgpu_ring *ring; + + gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + gfx_v9_4_3_disable_gpa_mode(adev); + + r = gfx_v9_4_3_cp_compute_load_microcode(adev); + if (r) + return r; + } + + r = gfx_v9_4_3_kiq_resume(adev); + if (r) + return r; + + r = gfx_v9_4_3_kcq_resume(adev); + if (r) + return r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + amdgpu_ring_test_helper(ring); + } + + gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); + + return 0; +} + +static void gfx_v9_4_3_cp_enable(struct amdgpu_device *adev, bool enable) +{ + gfx_v9_4_3_cp_compute_enable(adev, enable); +} + +static int gfx_v9_4_3_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gfx_v9_4_3_init_golden_registers(adev); + + gfx_v9_4_3_constants_init(adev); + + r = adev->gfx.rlc.funcs->resume(adev); + if (r) + return r; + + r = gfx_v9_4_3_cp_resume(adev); + if (r) + return r; + + return r; +} + +static int gfx_v9_4_3_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + + if (amdgpu_gfx_disable_kcq(adev, 0)) + DRM_ERROR("KCQ disable failed\n"); + + /* Use deinitialize sequence from CAIL when unbinding device from driver, + * otherwise KIQ is hanging when binding back + */ + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, + adev->gfx.kiq[0].ring.pipe, + adev->gfx.kiq[0].ring.queue, 0); + gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[0].ring); + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } + + gfx_v9_4_3_cp_enable(adev, false); + + /* Skip suspend with A+A reset */ + if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { + dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + return 0; + } + + adev->gfx.rlc.funcs->stop(adev); + return 0; +} + +static int gfx_v9_4_3_suspend(void *handle) +{ + return gfx_v9_4_3_hw_fini(handle); +} + +static int gfx_v9_4_3_resume(void *handle) +{ + return gfx_v9_4_3_hw_init(handle); +} + +static bool gfx_v9_4_3_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), + GRBM_STATUS, GUI_ACTIVE)) + return false; + else + return true; +} + +static int gfx_v9_4_3_wait_for_idle(void *handle) +{ + unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (gfx_v9_4_3_is_idle(handle)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int gfx_v9_4_3_soft_reset(void *handle) +{ + u32 grbm_soft_reset = 0; + u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* GRBM_STATUS */ + tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS); + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | + GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | + GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_CP, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); + } + + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_CP, 1); + } + + /* GRBM_STATUS2 */ + tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS2); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + + + if (grbm_soft_reset) { + /* stop the rlc */ + adev->gfx.rlc.funcs->stop(adev); + + /* Disable MEC parsing/prefetching */ + gfx_v9_4_3_cp_compute_enable(adev, false); + + if (grbm_soft_reset) { + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + } + + /* Wait a little for things to settle down */ + udelay(50); + } + return 0; +} + +static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, + uint32_t vmid, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size) +{ + struct amdgpu_device *adev = ring->adev; + + /* GDS Base */ + gfx_v9_4_3_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, + gds_base); + + /* GDS Size */ + gfx_v9_4_3_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, + gds_size); + + /* GWS */ + gfx_v9_4_3_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, + gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); + + /* OA */ + gfx_v9_4_3_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, + (1 << (oa_size + oa_base)) - (1 << oa_base)); +} + +static int gfx_v9_4_3_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); + gfx_v9_4_3_set_kiq_pm4_funcs(adev); + gfx_v9_4_3_set_ring_funcs(adev); + gfx_v9_4_3_set_irq_funcs(adev); + gfx_v9_4_3_set_gds_init(adev); + gfx_v9_4_3_set_rlc_funcs(adev); + + return gfx_v9_4_3_init_microcode(adev); +} + +static int gfx_v9_4_3_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); + if (r) + return r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); + if (r) + return r; + + return 0; +} + +static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + + amdgpu_gfx_rlc_enter_safe_mode(adev); + + /* It is disabled by HW by default */ + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + /* 1 - RLC_CGTT_MGCG_OVERRIDE */ + def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + + data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); + + /* only for Vega10 & Raven1 */ + data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + + /* MGLS is a global flag to control all MGLS in GFX */ + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { + /* 2 - RLC memory Light sleep */ + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + def = data = RREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL); + data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; + if (def != data) + WREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL, data); + } + /* 3 - CP memory Light sleep */ + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { + def = data = RREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL); + data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + if (def != data) + WREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL, data); + } + } + } else { + /* 1 - MGCG_OVERRIDE */ + def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + + data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + + /* 2 - disable MGLS in RLC */ + data = RREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL); + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { + data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; + WREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL, data); + } + + /* 3 - disable MGLS in CP */ + data = RREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + WREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL, data); + } + } + + amdgpu_gfx_rlc_exit_safe_mode(adev); +} + +static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + amdgpu_gfx_rlc_enter_safe_mode(adev); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { + def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + /* unset CGCG override */ + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; + else + data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; + /* update CGCG and CGLS override bits */ + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + + /* enable cgcg FSM(0x0000363F) */ + def = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + + if (adev->asic_type == CHIP_ARCTURUS) + data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + else + data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) + data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); + + /* set IDLE_POLL_COUNT(0x00900100) */ + def = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); + data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | + (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); + if (def != data) + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); + } else { + def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + /* reset CGCG/CGLS bits */ + data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + /* disable cgcg and cgls in FSM */ + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); + } + + amdgpu_gfx_rlc_exit_safe_mode(adev); +} + +static int gfx_v9_4_3_update_gfx_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + if (enable) { + /* CGCG/CGLS should be enabled after MGCG/MGLS + * === MGCG + MGLS === + */ + gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable); + /* === CGCG + CGLS === */ + gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable); + } else { + /* CGCG/CGLS should be disabled before MGCG/MGLS + * === CGCG + CGLS === + */ + gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable); + /* === MGCG + MGLS === */ + gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable); + } + return 0; +} + +static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, .set_safe_mode = gfx_v9_4_3_set_safe_mode, .unset_safe_mode = gfx_v9_4_3_unset_safe_mode, .init = gfx_v9_4_3_rlc_init, + .get_csb_size = gfx_v9_4_3_get_csb_size, + .get_csb_buffer = gfx_v9_4_3_get_csb_buffer, .resume = gfx_v9_4_3_rlc_resume, .stop = gfx_v9_4_3_rlc_stop, .reset = gfx_v9_4_3_rlc_reset, @@ -428,3 +2223,828 @@ const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, }; + +static int gfx_v9_4_3_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static int gfx_v9_4_3_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 3): + gfx_v9_4_3_update_gfx_clock_gating(adev, + state == AMD_CG_STATE_GATE); + break; + default: + break; + } + return 0; +} + +static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + /* AMD_CG_SUPPORT_GFX_MGCG */ + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_CGTT_MGCG_OVERRIDE)); + if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_MGCG; + + /* AMD_CG_SUPPORT_GFX_CGCG */ + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_CGCG_CGLS_CTRL)); + if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGCG; + + /* AMD_CG_SUPPORT_GFX_CGLS */ + if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGLS; + + /* AMD_CG_SUPPORT_GFX_RLC_LS */ + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_MEM_SLP_CNTL)); + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; + + /* AMD_CG_SUPPORT_GFX_CP_LS */ + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regCP_MEM_SLP_CNTL)); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; +} + +static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask, reg_mem_engine; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + switch (ring->me) { + case 1: + ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + default: + return; + } + reg_mem_engine = 0; + } else { + ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; + reg_mem_engine = 1; /* pfp */ + } + + gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1, + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + ref_and_mask, ref_and_mask, 0x20); +} + +static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, control); +} + +static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned flags) +{ + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; + + /* RELEASE_MEM - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); + amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | + EOP_TC_NC_ACTION_EN) : + (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EOP_TC_MD_ACTION_EN)) | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); + + /* + * the address should be Qword aligned if 64bit write, Dword + * aligned if only send 32bit data low (discard data high) + */ + if (write64bit) + BUG_ON(addr & 0x7); + else + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + amdgpu_ring_write(ring, 0); +} + +static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0, + lower_32_bits(addr), upper_32_bits(addr), + seq, 0xffffffff, 4); +} + +static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); +} + +static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring) +{ + return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ +} + +static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring) +{ + u64 wptr; + + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) + wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); + else + BUG(); + return wptr; +} + +static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) { + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); + } else { + BUG(); /* only DOORBELL method supported on gfx9 now */ + } +} + +static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned int flags) +{ + struct amdgpu_device *adev = ring->adev; + + /* we only allocate 32bit for each seq wb address */ + BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + /* write fence seq to the "addr" */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + if (flags & AMDGPU_FENCE_FLAG_INT) { + /* set register to trigger INT */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ + } +} + +static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 0 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + reg_val_offs * 4)); +} + +static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val) +{ + uint32_t cmd = 0; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; + break; + case AMDGPU_RING_TYPE_KIQ: + cmd = (1 << 16); /* no inc addr */ + break; + default: + cmd = WR_CONFIRM; + break; + } + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, cmd); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); +} + +static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); +} + +static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, + ref, mask); +} + +static void gfx_v9_4_3_set_compute_eop_interrupt_state(struct amdgpu_device *adev, + int me, int pipe, + enum amdgpu_interrupt_state state) +{ + u32 mec_int_cntl, mec_int_cntl_reg; + + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + + if (me == 1) { + switch (pipe) { + case 0: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + break; + case 1: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); + break; + case 2: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); + break; + case 3: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + } else { + DRM_DEBUG("invalid me %d\n", me); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + TIME_STAMP_INT_ENABLE, 0); + WREG32(mec_int_cntl_reg, mec_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + TIME_STAMP_INT_ENABLE, 1); + WREG32(mec_int_cntl_reg, mec_int_cntl); + break; + default: + break; + } +} + +static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + break; + default: + break; + } + + return 0; +} + +static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + break; + default: + break; + } + + return 0; +} + +static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 3, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 3, state); + break; + default: + break; + } + return 0; +} + +static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int i; + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + + DRM_DEBUG("IH: CP EOP\n"); + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + /* Per-queue interrupt is supported for MEC starting from VI. + * The interrupt can only be enabled/disabled per pipe instead of per queue. + */ + if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) + amdgpu_fence_process(ring); + } + break; + } + return 0; +} + +static void gfx_v9_4_3_fault(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + int i; + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + } +} + +static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal register access in command stream\n"); + gfx_v9_4_3_fault(adev, entry); + return 0; +} + +static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in command stream\n"); + gfx_v9_4_3_fault(adev, entry); + return 0; +} + +static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring) +{ + const unsigned int cp_coher_cntl = + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); + + /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); + amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ +} + +static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + uint32_t wcl_cs_reg; + + /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ + val = enable ? 0x1 : 0x7f; + + switch (pipe) { + case 0: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS0); + break; + case 1: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS1); + break; + case 2: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS2); + break; + case 3: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS3); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} +static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : 0x07ffffff; + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_GFX), + val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable); + + } +} + +static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { + .name = "gfx_v9_4_3", + .early_init = gfx_v9_4_3_early_init, + .late_init = gfx_v9_4_3_late_init, + .sw_init = gfx_v9_4_3_sw_init, + .sw_fini = gfx_v9_4_3_sw_fini, + .hw_init = gfx_v9_4_3_hw_init, + .hw_fini = gfx_v9_4_3_hw_fini, + .suspend = gfx_v9_4_3_suspend, + .resume = gfx_v9_4_3_resume, + .is_idle = gfx_v9_4_3_is_idle, + .wait_for_idle = gfx_v9_4_3_wait_for_idle, + .soft_reset = gfx_v9_4_3_soft_reset, + .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, + .set_powergating_state = gfx_v9_4_3_set_powergating_state, + .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, +}; + +static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { + .type = AMDGPU_RING_TYPE_COMPUTE, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, + .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, + .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, + .emit_frame_size = + 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ + 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ + 5 + /* hdp invalidate */ + 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ + 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ + 7 + /* gfx_v9_4_3_emit_mem_sync */ + 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ + .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ + .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, + .emit_fence = gfx_v9_4_3_ring_emit_fence, + .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush, + .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch, + .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, + .test_ring = gfx_v9_4_3_ring_test_ring, + .test_ib = gfx_v9_4_3_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_wreg = gfx_v9_4_3_ring_emit_wreg, + .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, + .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, + .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, +}; + +static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { + .type = AMDGPU_RING_TYPE_KIQ, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, + .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, + .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, + .emit_frame_size = + 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ + 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ + 5 + /* hdp invalidate */ + 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ + 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */ + .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ + .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq, + .test_ring = gfx_v9_4_3_ring_test_ring, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_rreg = gfx_v9_4_3_ring_emit_rreg, + .emit_wreg = gfx_v9_4_3_ring_emit_wreg, + .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, +}; + +static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + adev->gfx.kiq[0].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].funcs = &gfx_v9_4_3_ring_funcs_compute; +} + +static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { + .set = gfx_v9_4_3_set_eop_interrupt_state, + .process = gfx_v9_4_3_eop_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { + .set = gfx_v9_4_3_set_priv_reg_fault_state, + .process = gfx_v9_4_3_priv_reg_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { + .set = gfx_v9_4_3_set_priv_inst_fault_state, + .process = gfx_v9_4_3_priv_inst_irq, +}; + +static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; + adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs; + + adev->gfx.priv_reg_irq.num_types = 1; + adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; + + adev->gfx.priv_inst_irq.num_types = 1; + adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; +} + +static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) +{ + adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs; +} + + +static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) +{ + /* init asci gds info */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 3): + /* 9.4.3 removed all the GDS internal memory, + * only support GWS opcode in kernel, like barrier + * semaphore.etc */ + adev->gds.gds_size = 0; + break; + default: + adev->gds.gds_size = 0x10000; + break; + } + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 3): + /* deprecated for 9.4.3, no usage at all */ + adev->gds.gds_compute_max_wave_id = 0; + break; + default: + /* this really depends on the chip */ + adev->gds.gds_compute_max_wave_id = 0x7ff; + break; + } + + adev->gds.gws_size = 64; + adev->gds.oa_size = 16; +} + +static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); +} + +static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev) +{ + u32 data, mask; + + data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); + + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + + mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); + + return (~data) & mask; +} + +static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, + struct amdgpu_cu_info *cu_info) +{ + int i, j, k, counter, active_cu_number = 0; + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; + unsigned disable_masks[4 * 4]; + + if (!adev || !cu_info) + return -EINVAL; + + /* + * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs + */ + if (adev->gfx.config.max_shader_engines * + adev->gfx.config.max_sh_per_se > 16) + return -EINVAL; + + amdgpu_gfx_parse_disable_cu(disable_masks, + adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se); + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + ao_bitmap = 0; + counter = 0; + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_set_user_cu_inactive_bitmap( + adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); + bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); + + /* + * The bitmap(and ao_cu_bitmap) in cu_info structure is + * 4x4 size array, and it's usually suitable for Vega + * ASICs which has 4*2 SE/SH layout. + * But for Arcturus, SE/SH layout is changed to 8*1. + * To mostly reduce the impact, we make it compatible + * with current bitmap array as below: + * SE4,SH0 --> bitmap[0][1] + * SE5,SH0 --> bitmap[1][1] + * SE6,SH0 --> bitmap[2][1] + * SE7,SH0 --> bitmap[3][1] + */ + cu_info->bitmap[i % 4][j + i / 4] = bitmap; + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { + if (bitmap & mask) { + if (counter < adev->gfx.config.max_cu_per_sh) + ao_bitmap |= mask; + counter++; + } + mask <<= 1; + } + active_cu_number += counter; + if (i < 2 && j < 2) + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; + } + } + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + cu_info->number = active_cu_number; + cu_info->ao_cu_mask = ao_cu_mask; + cu_info->simd_per_cu = NUM_SIMD_PER_CU; + + return 0; +} + +const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 9, + .minor = 4, + .rev = 0, + .funcs = &gfx_v9_4_3_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h index 84e69701b81a..4b530f4c1295 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h @@ -24,7 +24,6 @@ #ifndef __GFX_V9_4_3_H__ #define __GFX_V9_4_3_H__ -extern const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs; -extern const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs; +extern const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block; #endif /* __GFX_V9_4_3_H__ */ From 5aa998baab3360d0f1b93d6aff3df924045f956c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 17 Nov 2021 16:28:51 +0800 Subject: [PATCH 052/861] drm/amdgpu: add xcc index argument to soc15_grbm_select To support grbm select for multiple XCD case. v2: unify naming style Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 10 +++---- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 +++++++++---------- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 26 +++++++++---------- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc15.h | 2 +- 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 03875b971ba6..ebb35633058c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -50,12 +50,12 @@ static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, mec, pipe, queue, vmid); + soc15_grbm_select(adev, mec, pipe, queue, vmid, 0); } static void unlock_srbm(struct amdgpu_device *adev) { - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } @@ -700,7 +700,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, *wave_cnt = 0; pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; - soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0); + soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, 0); reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot); *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; @@ -772,7 +772,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); lock_spi_csq_mutexes(adev); - soc15_grbm_select(adev, 1, 0, 0, 0); + soc15_grbm_select(adev, 1, 0, 0, 0, 0); /* * Iterate through the shader engines and arrays of the device @@ -821,7 +821,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); unlock_spi_csq_mutexes(adev); /* Update the output parameters and return */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 62af92e5be51..4939fd61355b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1831,7 +1831,7 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, u32 me, u32 pipe, u32 q, u32 vm) { - soc15_grbm_select(adev, me, pipe, q, vm); + soc15_grbm_select(adev, me, pipe, q, vm, 0); } static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { @@ -2324,12 +2324,12 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) mutex_lock(&adev->srbm_mutex); for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases); } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); /* Initialize all compute VMIDs to have no GDS, GWS, or OA @@ -2394,7 +2394,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ if (i == 0) { tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, @@ -2416,7 +2416,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp); } } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -3540,9 +3540,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); gfx_v9_0_kiq_init_register(ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); @@ -3551,10 +3551,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev) && adev->in_suspend) amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); gfx_v9_0_mqd_init(ring); gfx_v9_0_kiq_init_register(ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[0].mqd_backup) @@ -3582,9 +3582,9 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); gfx_v9_0_mqd_init(ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) @@ -3791,9 +3791,9 @@ static int gfx_v9_0_hw_fini(void *handle) mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, adev->gfx.kiq[0].ring.pipe, - adev->gfx.kiq[0].ring.queue, 0); + adev->gfx.kiq[0].ring.queue, 0, 0); gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 3a797424579c..93438770ca1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -761,7 +761,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, for (i = first_vmid; i < last_vmid; i++) { data = 0; - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, @@ -769,7 +769,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data); } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 212d48114da8..12185e7aac4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -658,7 +658,7 @@ static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, u32 me, u32 pipe, u32 q, u32 vm) { - soc15_grbm_select(adev, me, pipe, q, vm); + soc15_grbm_select(adev, me, pipe, q, vm, 0); } static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { @@ -926,12 +926,12 @@ static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev) mutex_lock(&adev->srbm_mutex); for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, sh_mem_bases); } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); /* Initialize all compute VMIDs to have no GDS, GWS, or OA @@ -977,7 +977,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ if (i == 0) { tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, @@ -999,7 +999,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, tmp); } } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -1706,19 +1706,19 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring) amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); gfx_v9_4_3_kiq_init_register(ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); gfx_v9_4_3_mqd_init(ring); gfx_v9_4_3_kiq_init_register(ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[0].mqd_backup) @@ -1746,9 +1746,9 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring) ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); gfx_v9_4_3_mqd_init(ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) @@ -1896,9 +1896,9 @@ static int gfx_v9_4_3_hw_fini(void *handle) mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, adev->gfx.kiq[0].ring.pipe, - adev->gfx.kiq[0].ring.queue, 0); + adev->gfx.kiq[0].ring.queue, 0, 0); gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[0].ring); - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bc5dd80f10c1..4b79a8933476 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -311,7 +311,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) void soc15_grbm_select(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 queue, u32 vmid) + u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id) { u32 grbm_gfx_cntl = 0; grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); @@ -319,7 +319,7 @@ void soc15_grbm_select(struct amdgpu_device *adev, grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); - WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); + WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl); } static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index efc2a253e8db..2b41ee968dd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -100,7 +100,7 @@ struct soc15_ras_field_entry { #define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift) void soc15_grbm_select(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 queue, u32 vmid); + u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id); void soc15_set_virt_ops(struct amdgpu_device *adev); void soc15_program_register_sequence(struct amdgpu_device *adev, From 6f917fdc934518401ff2e166e6db1f6ac1ef1078 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 17 Nov 2021 17:24:02 +0800 Subject: [PATCH 053/861] drm/amdgpu: add multi-XCC initial support in gfx_v9_4_3.c Each XCD needs to be initialized respectively. The major changes are: 1. add iteration to do rlc/kiq/kcq init/fini for each xcd 2. load rlc/mec microcode to each xcd 3. add argument to specify xcc index in initialization functions Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 727 +++++++++++++----------- 1 file changed, 397 insertions(+), 330 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 12185e7aac4e..56999bb7ac26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -181,7 +181,9 @@ static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - adev->gfx.kiq[0].pmf = &gfx_v9_4_3_kiq_pm4_funcs; + int i; + for (i = 0; i < adev->gfx.num_xcd; i++) + adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; } static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) @@ -504,7 +506,9 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + for (i = 0; i < adev->gfx.num_xcd; i++) + bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, + AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -731,7 +735,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) } static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, - int mec, int pipe, int queue) + int xcc_id, int mec, int pipe, int queue) { unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; @@ -740,6 +744,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring = &adev->gfx.compute_ring[ring_id]; /* mec0 is me1 */ + ring->xcc_id = xcc_id; ring->me = mec + 1; ring->pipe = pipe; ring->queue = queue; @@ -750,7 +755,8 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); ring->vm_hub = AMDGPU_GFXHUB_0; - sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); + sprintf(ring->name, "comp_%d.%d.%d.%d", + ring->xcc_id, ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) @@ -764,7 +770,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v9_4_3_sw_init(void *handle) { - int i, j, k, r, ring_id; + int i, j, k, r, ring_id, xcc_id; struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -805,40 +811,46 @@ static int gfx_v9_4_3_sw_init(void *handle) /* set up the compute queues - allocate horizontally across pipes */ ring_id = 0; - for (i = 0; i < adev->gfx.mec.num_mec; ++i) { - for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { - for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, - k, j)) - continue; + for (xcc_id = 0; xcc_id < adev->gfx.num_xcd; xcc_id++) { - r = gfx_v9_4_3_compute_ring_init(adev, - ring_id, - i, k, j); - if (r) - return r; + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; + k++) { + if (!amdgpu_gfx_is_mec_queue_enabled( + adev, xcc_id, i, k, j)) + continue; - ring_id++; + r = gfx_v9_4_3_compute_ring_init(adev, + ring_id, + xcc_id, + i, k, j); + if (r) + return r; + + ring_id++; + } } } + + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id); + if (r) { + DRM_ERROR("Failed to init KIQ BOs!\n"); + return r; + } + + kiq = &adev->gfx.kiq[xcc_id]; + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id); + if (r) + return r; + + /* create MQD for all compute queues as wel as KIQ for SRIOV case */ + r = amdgpu_gfx_mqd_sw_init(adev, + sizeof(struct v9_mqd_allocation), xcc_id); + if (r) + return r; } - r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); - if (r) { - DRM_ERROR("Failed to init KIQ BOs!\n"); - return r; - } - - kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); - if (r) - return r; - - /* create MQD for all compute queues as wel as KIQ for SRIOV case */ - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0); - if (r) - return r; - r = gfx_v9_4_3_gpu_early_init(adev); if (r) return r; @@ -851,12 +863,15 @@ static int gfx_v9_4_3_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - for (i = 0; i < adev->gfx.num_compute_rings; i++) + for (i = 0; i < adev->gfx.num_compute_rings * + adev->gfx.num_xcd; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev, 0); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev, 0); + for (i = 0; i < adev->gfx.num_xcd; i++) { + amdgpu_gfx_mqd_sw_fini(adev, i); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); + amdgpu_gfx_kiq_fini(adev, i); + } gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); @@ -881,7 +896,7 @@ static u32 gfx_v9_4_3_get_rb_active_bitmap(struct amdgpu_device *adev) return (~data) & mask; } -static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev) +static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev, int xcc_id) { int i, j; u32 data; @@ -906,7 +921,7 @@ static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev) } #define DEFAULT_SH_MEM_BASES (0x6000) -static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev) +static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev, int xcc_id) { int i; uint32_t sh_mem_config; @@ -926,25 +941,25 @@ static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev) mutex_lock(&adev->srbm_mutex); for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { - soc15_grbm_select(adev, 0, 0, 0, i, 0); + soc15_grbm_select(adev, 0, 0, 0, i, xcc_id); /* CP and shaders */ - WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, sh_mem_config); - WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, sh_mem_bases); + WREG32_SOC15_RLC(GC, xcc_id, regSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15_RLC(GC, xcc_id, regSH_MEM_BASES, sh_mem_bases); } - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); mutex_unlock(&adev->srbm_mutex); /* Initialize all compute VMIDs to have no GDS, GWS, or OA acccess. These should be enabled by FW for target VMIDs. */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { - WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); - WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_OA_VMID0, i, 0); } } -static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev) +static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) { int vmid; @@ -955,21 +970,23 @@ static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev) * access so that HWS firmware can save/restore entries. */ for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { - WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); - WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); - WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); - WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_BASE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_SIZE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_GWS_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_OA_VMID0, vmid, 0); } } static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) { u32 tmp; - int i; + int i, j; - WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); + for (i = 0; i < adev->gfx.num_xcd; i++) { + WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff); + gfx_v9_4_3_setup_rb(adev, i); + } - gfx_v9_4_3_setup_rb(adev); gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, regDB_DEBUG2); @@ -977,63 +994,68 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { - soc15_grbm_select(adev, 0, 0, 0, i, 0); - /* CP and shaders */ - if (i == 0) { - tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, - SH_MEM_ALIGNMENT_MODE_UNALIGNED); - tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, - !!adev->gmc.noretry); - WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, tmp); - WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, 0); - } else { - tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, - SH_MEM_ALIGNMENT_MODE_UNALIGNED); - tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, - !!adev->gmc.noretry); - WREG32_SOC15_RLC(GC, 0, regSH_MEM_CONFIG, tmp); - tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, - (adev->gmc.private_aperture_start >> 48)); - tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, - (adev->gmc.shared_aperture_start >> 48)); - WREG32_SOC15_RLC(GC, 0, regSH_MEM_BASES, tmp); + for (j = 0; j < adev->gfx.num_xcd; j++) { + soc15_grbm_select(adev, 0, 0, 0, i, j); + /* CP and shaders */ + if (i == 0) { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + SH_MEM_ALIGNMENT_MODE_UNALIGNED); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, + !!adev->gmc.noretry); + WREG32_SOC15_RLC(GC, j, regSH_MEM_CONFIG, tmp); + WREG32_SOC15_RLC(GC, j, regSH_MEM_BASES, 0); + } else { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + SH_MEM_ALIGNMENT_MODE_UNALIGNED); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, + !!adev->gmc.noretry); + WREG32_SOC15_RLC(GC, j, regSH_MEM_CONFIG, tmp); + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, + (adev->gmc.private_aperture_start >> 48)); + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, + (adev->gmc.shared_aperture_start >> 48)); + WREG32_SOC15_RLC(GC, j, regSH_MEM_BASES, tmp); + } } } soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - gfx_v9_4_3_init_compute_vmid(adev); - gfx_v9_4_3_init_gds_vmid(adev); + for (i = 0; i < adev->gfx.num_xcd; i++) { + gfx_v9_4_3_init_compute_vmid(adev, i); + gfx_v9_4_3_init_gds_vmid(adev, i); + } } -static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev) +static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev, + int xcc_id) { - WREG32_FIELD15_PREREG(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); + WREG32_FIELD15_PREREG(GC, xcc_id, RLC_SRM_CNTL, SRM_ENABLE, 1); } -static void gfx_v9_4_3_init_csb(struct amdgpu_device *adev) +static void gfx_v9_4_3_init_csb(struct amdgpu_device *adev, int xcc_id) { adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regRLC_CSIB_ADDR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_ADDR_HI), adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regRLC_CSIB_ADDR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_ADDR_LO), adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regRLC_CSIB_LENGTH), + WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_LENGTH), adev->gfx.rlc.clear_state_size); } -static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev) +static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) { - gfx_v9_4_3_init_csb(adev); + gfx_v9_4_3_init_csb(adev, xcc_id); /* * Rlc save restore list is workable since v2_1. * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) - gfx_v9_4_3_enable_save_restore_machine(adev); + gfx_v9_4_3_enable_save_restore_machine(adev, xcc_id); if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | @@ -1115,7 +1137,8 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev) +static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, + int xcc_id) { u32 i, j, k; u32 mask; @@ -1171,17 +1194,25 @@ static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) { - WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); - gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); - gfx_v9_4_3_wait_for_rlc_serdes(adev); + int i; + + for (i = 0; i < adev->gfx.num_xcd; i++) { + WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0); + gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); + gfx_v9_4_3_wait_for_rlc_serdes(adev, i); + } } static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) { - WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - udelay(50); - WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); - udelay(50); + int i; + + for (i = 0; i < adev->gfx.num_xcd; i++) { + WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + udelay(50); + WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); + udelay(50); + } } static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) @@ -1189,35 +1220,38 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) #ifdef AMDGPU_RLC_DEBUG_RETRY u32 rlc_ucode_ver; #endif + int i; - WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); - udelay(50); - - /* carrizo do enable cp interrupt after cp inited */ - if (!(adev->flags & AMD_IS_APU)) { - gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); + for (i = 0; i < adev->gfx.num_xcd; i++) { + WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1); udelay(50); - } + + /* carrizo do enable cp interrupt after cp inited */ + if (!(adev->flags & AMD_IS_APU)) { + gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); + udelay(50); + } #ifdef AMDGPU_RLC_DEBUG_RETRY - /* RLC_GPM_GENERAL_6 : RLC Ucode version */ - rlc_ucode_ver = RREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_6); - if (rlc_ucode_ver == 0x108) { - dev_info(adev->dev, - "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", - rlc_ucode_ver, adev->gfx.rlc_fw_version); - /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, - * default is 0x9C4 to create a 100us interval */ - WREG32_SOC15(GC, 0, regRLC_GPM_TIMER_INT_3, 0x9C4); - /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr - * to disable the page fault retry interrupts, default is - * 0x100 (256) */ - WREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_12, 0x100); - } + /* RLC_GPM_GENERAL_6 : RLC Ucode version */ + rlc_ucode_ver = RREG32_SOC15(GC, i, regRLC_GPM_GENERAL_6); + if (rlc_ucode_ver == 0x108) { + dev_info(adev->dev, + "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", + rlc_ucode_ver, adev->gfx.rlc_fw_version); + /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, + * default is 0x9C4 to create a 100us interval */ + WREG32_SOC15(GC, i, regRLC_GPM_TIMER_INT_3, 0x9C4); + /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr + * to disable the page fault retry interrupts, default is + * 0x100 (256) */ + WREG32_SOC15(GC, i, regRLC_GPM_GENERAL_12, 0x100); + } #endif + } } -static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev) +static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id) { const struct rlc_firmware_header_v2_0 *hdr; const __le32 *fw_data; @@ -1233,36 +1267,38 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev) le32_to_cpu(hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, + WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_ADDR, RLCG_UCODE_LOADING_START_ADDRESS); for (i = 0; i < fw_size; i++) { if (amdgpu_emu_mode == 1 && i % 100 == 0) { dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); msleep(1); } - WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); } - WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); + WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); return 0; } static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) { - int r; + int r, i; adev->gfx.rlc.funcs->stop(adev); - /* disable CG */ - WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); + for (i = 0; i < adev->gfx.num_xcd; i++) { + /* disable CG */ + WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0); - gfx_v9_4_3_init_pg(adev); + gfx_v9_4_3_init_pg(adev, i); - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - /* legacy rlc firmware loading */ - r = gfx_v9_4_3_rlc_load_microcode(adev); - if (r) - return r; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + /* legacy rlc firmware loading */ + r = gfx_v9_4_3_rlc_load_microcode(adev, i); + if (r) + return r; + } } adev->gfx.rlc.funcs->start(adev); @@ -1270,7 +1306,8 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, + unsigned vmid) { u32 reg, data; @@ -1323,19 +1360,21 @@ static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offs ARRAY_SIZE(rlcg_access_gc_9_4_3)); } -static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev, bool enable) +static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev, + bool enable, int xcc_id) { if (enable) { - WREG32_SOC15_RLC(GC, 0, regCP_MEC_CNTL, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_MEC_CNTL, 0); } else { - WREG32_SOC15_RLC(GC, 0, regCP_MEC_CNTL, + WREG32_SOC15_RLC(GC, xcc_id, regCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - adev->gfx.kiq[0].ring.sched.ready = false; + adev->gfx.kiq[xcc_id].ring.sched.ready = false; } udelay(50); } -static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev) +static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev, + int xcc_id) { const struct gfx_firmware_header_v1_0 *mec_hdr; const __le32 *fw_data; @@ -1347,7 +1386,7 @@ static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev) if (!adev->gfx.mec_fw) return -EINVAL; - gfx_v9_4_3_cp_compute_enable(adev, false); + gfx_v9_4_3_cp_compute_enable(adev, false, xcc_id); mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); @@ -1358,17 +1397,17 @@ static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev) tmp = 0; tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); + WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_CNTL, tmp); - WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, + WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); - WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, + WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_HI, upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); mec_ucode_addr_offset = - SOC15_REG_OFFSET(GC, 0, regCP_MEC_ME1_UCODE_ADDR); + SOC15_REG_OFFSET(GC, xcc_id, regCP_MEC_ME1_UCODE_ADDR); mec_ucode_data_offset = - SOC15_REG_OFFSET(GC, 0, regCP_MEC_ME1_UCODE_DATA); + SOC15_REG_OFFSET(GC, xcc_id, regCP_MEC_ME1_UCODE_DATA); /* MEC1 */ WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); @@ -1383,18 +1422,18 @@ static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev) } /* KIQ functions */ -static void gfx_v9_4_3_kiq_setting(struct amdgpu_ring *ring) +static void gfx_v9_4_3_kiq_setting(struct amdgpu_ring *ring, int xcc_id) { uint32_t tmp; struct amdgpu_device *adev = ring->adev; /* tell RLC which is KIQ queue */ - tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); + tmp = RREG32_SOC15(GC, xcc_id, regRLC_CP_SCHEDULERS); tmp &= 0xffffff00; tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); - WREG32_SOC15_RLC(GC, 0, regRLC_CP_SCHEDULERS, tmp); + WREG32_SOC15_RLC(GC, xcc_id, regRLC_CP_SCHEDULERS, tmp); tmp |= 0x80; - WREG32_SOC15_RLC(GC, 0, regRLC_CP_SCHEDULERS, tmp); + WREG32_SOC15_RLC(GC, xcc_id, regRLC_CP_SCHEDULERS, tmp); } static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) @@ -1538,123 +1577,123 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) return 0; } -static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring) +static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; int j; /* disable wptr polling */ - WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); + WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_WPTR_POLL_CNTL, EN, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_EOP_BASE_ADDR, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi); /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_EOP_CONTROL, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_CONTROL, mqd->cp_hqd_eop_control); /* enable doorbell? */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); /* disable the queue if it's active */ - if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { - WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + if (RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 1); for (j = 0; j < adev->usec_timeout; j++) { - if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + if (!(RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1)) break; udelay(1); } - WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_LO, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi); } /* set the pointer to the MQD */ - WREG32_SOC15_RLC(GC, 0, regCP_MQD_BASE_ADDR, + WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); - WREG32_SOC15_RLC(GC, 0, regCP_MQD_BASE_ADDR_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); /* set MQD vmid to 0 */ - WREG32_SOC15_RLC(GC, 0, regCP_MQD_CONTROL, + WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_CONTROL, mqd->cp_mqd_control); /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_BASE, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_BASE_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); /* set up the HQD, this is similar to CP_RB0_CNTL */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_CONTROL, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); /* set the wb address whether it's enabled or not */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR_REPORT_ADDR, mqd->cp_hqd_pq_rptr_report_addr_lo); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, mqd->cp_hqd_pq_rptr_report_addr_hi); /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi); /* enable the doorbell if requested */ if (ring->use_doorbell) { - WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, + WREG32_SOC15(GC, xcc_id, regCP_MEC_DOORBELL_RANGE_LOWER, (adev->doorbell_index.kiq * 2) << 2); - WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, + WREG32_SOC15(GC, xcc_id, regCP_MEC_DOORBELL_RANGE_UPPER, (adev->doorbell_index.userqueue_end * 2) << 2); } - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_LO, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_HI, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi); /* set the vmid for the queue */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_VMID, mqd->cp_hqd_vmid); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PERSISTENT_STATE, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); /* activate the queue */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_ACTIVE, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_ACTIVE, mqd->cp_hqd_active); if (ring->use_doorbell) - WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_STATUS, DOORBELL_ENABLE, 1); return 0; } -static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring) +static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; int j; /* disable the queue if it's active */ - if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + if (RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1) { - WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 1); for (j = 0; j < adev->usec_timeout; j++) { - if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + if (!(RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1)) break; udelay(1); } @@ -1663,72 +1702,71 @@ static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring) DRM_DEBUG("KIQ dequeue request failed.\n"); /* Manual disable if dequeue request times out */ - WREG32_SOC15_RLC(GC, 0, regCP_HQD_ACTIVE, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_ACTIVE, 0); } - WREG32_SOC15_RLC(GC, 0, regCP_HQD_DEQUEUE_REQUEST, + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 0); } - WREG32_SOC15_RLC(GC, 0, regCP_HQD_IQ_TIMER, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_IB_CONTROL, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PERSISTENT_STATE, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_RPTR, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); - WREG32_SOC15_RLC(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_IQ_TIMER, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_IB_CONTROL, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PERSISTENT_STATE, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, 0); + WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, 0); return 0; } -static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring) +static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; struct v9_mqd *tmp_mqd; - gfx_v9_4_3_kiq_setting(ring); + gfx_v9_4_3_kiq_setting(ring, xcc_id); /* GPU could be in bad state during probe, driver trigger the reset * after load the SMU, in this case , the mqd is not be initialized. * driver need to re-init the mqd. * check mqd->cp_hqd_pq_control since this value should not be 0 */ - tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup; + tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { /* for GPU_RESET case , reset MQD to a clean status */ - if (adev->gfx.kiq[0].mqd_backup) - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation)); + if (adev->gfx.kiq[xcc_id].mqd_backup) + memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; amdgpu_ring_clear_ring(ring); - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); - gfx_v9_4_3_kiq_init_register(ring); - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id); + gfx_v9_4_3_kiq_init_register(ring, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id); gfx_v9_4_3_mqd_init(ring); - gfx_v9_4_3_kiq_init_register(ring); - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + gfx_v9_4_3_kiq_init_register(ring, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.kiq[0].mqd_backup) - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); + if (adev->gfx.kiq[xcc_id].mqd_backup) + memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); } return 0; } -static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring) +static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -1746,9 +1784,9 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring) ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id); gfx_v9_4_3_mqd_init(ring); - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) @@ -1769,12 +1807,12 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev) +static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; int r; - ring = &adev->gfx.kiq[0].ring; + ring = &adev->gfx.kiq[xcc_id].ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) @@ -1784,7 +1822,7 @@ static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev) if (unlikely(r != 0)) return r; - gfx_v9_4_3_kiq_init_queue(ring); + gfx_v9_4_3_kiq_init_queue(ring, xcc_id); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); @@ -1792,22 +1830,22 @@ static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev) return 0; } -static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev) +static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring = NULL; int r = 0, i; - gfx_v9_4_3_cp_compute_enable(adev, true); + gfx_v9_4_3_cp_compute_enable(adev, true, xcc_id); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; + ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v9_4_3_kcq_init_queue(ring); + r = gfx_v9_4_3_kcq_init_queue(ring, xcc_id); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -1816,47 +1854,50 @@ static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev, 0); + r = amdgpu_gfx_enable_kcq(adev, xcc_id); done: return r; } static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r, i; + int r, i, j; struct amdgpu_ring *ring; - gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); + for (i = 0; i < adev->gfx.num_xcd; i++) { + gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - gfx_v9_4_3_disable_gpa_mode(adev); + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + gfx_v9_4_3_disable_gpa_mode(adev); - r = gfx_v9_4_3_cp_compute_load_microcode(adev); + r = gfx_v9_4_3_cp_compute_load_microcode(adev, i); + if (r) + return r; + } + + r = gfx_v9_4_3_kiq_resume(adev, i); if (r) return r; + + r = gfx_v9_4_3_kcq_resume(adev, i); + if (r) + return r; + + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings]; + amdgpu_ring_test_helper(ring); + } + + gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); } - r = gfx_v9_4_3_kiq_resume(adev); - if (r) - return r; - - r = gfx_v9_4_3_kcq_resume(adev); - if (r) - return r; - - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - amdgpu_ring_test_helper(ring); - } - - gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); - return 0; } -static void gfx_v9_4_3_cp_enable(struct amdgpu_device *adev, bool enable) +static void gfx_v9_4_3_cp_enable(struct amdgpu_device *adev, bool enable, + int xcc_id) { - gfx_v9_4_3_cp_compute_enable(adev, enable); + gfx_v9_4_3_cp_compute_enable(adev, enable, xcc_id); } static int gfx_v9_4_3_hw_init(void *handle) @@ -1882,28 +1923,31 @@ static int gfx_v9_4_3_hw_init(void *handle) static int gfx_v9_4_3_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - if (amdgpu_gfx_disable_kcq(adev, 0)) - DRM_ERROR("KCQ disable failed\n"); + for (i = 0; i < adev->gfx.num_xcd; i++) { + if (amdgpu_gfx_disable_kcq(adev, i)) + DRM_ERROR("XCD %d KCQ disable failed\n", i); - /* Use deinitialize sequence from CAIL when unbinding device from driver, - * otherwise KIQ is hanging when binding back - */ - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me, - adev->gfx.kiq[0].ring.pipe, - adev->gfx.kiq[0].ring.queue, 0, 0); - gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[0].ring); - soc15_grbm_select(adev, 0, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); + /* Use deinitialize sequence from CAIL when unbinding device + * from driver, otherwise KIQ is hanging when binding back + */ + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, adev->gfx.kiq[i].ring.me, + adev->gfx.kiq[i].ring.pipe, + adev->gfx.kiq[i].ring.queue, 0, i); + gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[i].ring, i); + soc15_grbm_select(adev, 0, 0, 0, 0, i); + mutex_unlock(&adev->srbm_mutex); + } + + gfx_v9_4_3_cp_enable(adev, false, i); } - gfx_v9_4_3_cp_enable(adev, false); - /* Skip suspend with A+A reset */ if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); @@ -1927,12 +1971,14 @@ static int gfx_v9_4_3_resume(void *handle) static bool gfx_v9_4_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; - if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), - GRBM_STATUS, GUI_ACTIVE)) - return false; - else - return true; + for (i = 0; i < adev->gfx.num_xcd; i++) { + if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS), + GRBM_STATUS, GUI_ACTIVE)) + return false; + } + return true; } static int gfx_v9_4_3_wait_for_idle(void *handle) @@ -1985,7 +2031,7 @@ static int gfx_v9_4_3_soft_reset(void *handle) adev->gfx.rlc.funcs->stop(adev); /* Disable MEC parsing/prefetching */ - gfx_v9_4_3_cp_compute_enable(adev, false); + gfx_v9_4_3_cp_compute_enable(adev, false, 0); if (grbm_soft_reset) { tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); @@ -2040,6 +2086,11 @@ static int gfx_v9_4_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* hardcode in emulation phase */ + adev->gfx.num_xcd = 1; + adev->gfx.num_xcc_per_xcp = 1; + adev->gfx.partition_mode = AMDGPU_SPX_PARTITION_MODE; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_4_3_set_kiq_pm4_funcs(adev); @@ -2068,7 +2119,7 @@ static int gfx_v9_4_3_late_init(void *handle) } static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) + bool enable, int xcc_id) { uint32_t data, def; @@ -2077,7 +2128,7 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { /* 1 - RLC_CGTT_MGCG_OVERRIDE */ - def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | @@ -2087,28 +2138,28 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; if (def != data) - WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data); /* MGLS is a global flag to control all MGLS in GFX */ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { /* 2 - RLC memory Light sleep */ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { - def = data = RREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL); + def = data = RREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL); data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; if (def != data) - WREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL, data); } /* 3 - CP memory Light sleep */ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { - def = data = RREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL); + def = data = RREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL); data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; if (def != data) - WREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL, data); } } } else { /* 1 - MGCG_OVERRIDE */ - def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -2116,20 +2167,20 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); if (def != data) - WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data); /* 2 - disable MGLS in RLC */ - data = RREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL); + data = RREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - WREG32_SOC15(GC, 0, regRLC_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL, data); } /* 3 - disable MGLS in CP */ - data = RREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL); + data = RREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - WREG32_SOC15(GC, 0, regCP_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL, data); } } @@ -2137,14 +2188,14 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad } static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *adev, - bool enable) + bool enable, int xcc_id) { uint32_t def, data; amdgpu_gfx_rlc_enter_safe_mode(adev); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { - def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); /* unset CGCG override */ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) @@ -2153,10 +2204,10 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; /* update CGCG and CGLS override bits */ if (def != data) - WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data); /* enable cgcg FSM(0x0000363F) */ - def = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + def = RREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL); if (adev->asic_type == CHIP_ARCTURUS) data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | @@ -2168,43 +2219,43 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; if (def != data) - WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); + WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data); /* set IDLE_POLL_COUNT(0x00900100) */ - def = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); + def = RREG32_SOC15(GC, xcc_id, regCP_RB_WPTR_POLL_CNTL); data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); if (def != data) - WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); + WREG32_SOC15(GC, xcc_id, regCP_RB_WPTR_POLL_CNTL, data); } else { - def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL); /* reset CGCG/CGLS bits */ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); /* disable cgcg and cgls in FSM */ if (def != data) - WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); + WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data); } amdgpu_gfx_rlc_exit_safe_mode(adev); } static int gfx_v9_4_3_update_gfx_clock_gating(struct amdgpu_device *adev, - bool enable) + bool enable, int xcc_id) { if (enable) { /* CGCG/CGLS should be enabled after MGCG/MGLS * === MGCG + MGLS === */ - gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable); + gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable, xcc_id); /* === CGCG + CGLS === */ - gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable); + gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable, xcc_id); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS * === CGCG + CGLS === */ - gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable); + gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable, xcc_id); /* === MGCG + MGLS === */ - gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable); + gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable, xcc_id); } return 0; } @@ -2234,14 +2285,16 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; if (amdgpu_sriov_vf(adev)) return 0; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): - gfx_v9_4_3_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE); + for (i = 0; i < adev->gfx.num_xcd; i++) + gfx_v9_4_3_update_gfx_clock_gating(adev, + state == AMD_CG_STATE_GATE, i); break; default: break; @@ -2509,8 +2562,9 @@ static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, } static void gfx_v9_4_3_set_compute_eop_interrupt_state(struct amdgpu_device *adev, - int me, int pipe, - enum amdgpu_interrupt_state state) + int me, int pipe, + enum amdgpu_interrupt_state state, + int xcc_id) { u32 mec_int_cntl, mec_int_cntl_reg; @@ -2523,16 +2577,16 @@ static void gfx_v9_4_3_set_compute_eop_interrupt_state(struct amdgpu_device *ade if (me == 1) { switch (pipe) { case 0: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE0_INT_CNTL); break; case 1: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE1_INT_CNTL); break; case 2: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE2_INT_CNTL); break; case 3: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE3_INT_CNTL); break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -2566,12 +2620,15 @@ static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { + int i; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.num_xcd; i++) + WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); break; default: break; @@ -2585,12 +2642,15 @@ static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { + int i; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.num_xcd; i++) + WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); break; default: break; @@ -2604,34 +2664,38 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - switch (type) { - case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 1, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 2, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 3, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 0, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 1, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 2, state); - break; - case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 3, state); - break; - default: - break; + int i; + for (i = 0; i < adev->gfx.num_xcd; i++) { + switch (type) { + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 1, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 2, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 3, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 0, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 1, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 2, state, i); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: + gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 3, state, i); + break; + default: + break; + } } + return 0; } @@ -2871,12 +2935,15 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, j; - adev->gfx.kiq[0].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; + for (i = 0; i < adev->gfx.num_xcd; i++) { + adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].funcs = &gfx_v9_4_3_ring_funcs_compute; + for (j = 0; j < adev->gfx.num_compute_rings; j++) + adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs + = &gfx_v9_4_3_ring_funcs_compute; + } } static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { From ec08571aca7c5e6bf2d1820db9c8aaa104eb9b68 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 17 Nov 2021 17:51:17 +0800 Subject: [PATCH 054/861] drm/amdgpu: add xcc index argument to gfx v9_4_3 functions Change those v9_4_3 interfaces which are exposed in gfx_v9_0.c. For some active single-xcc emu models, the code path in gfx_v9_0.c is better to keep reserved for a while. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 56999bb7ac26..f1c04140e717 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1068,13 +1068,13 @@ static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) } } -void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev) +void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; - data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); + data = RREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG); data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; - WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); + WREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG, data); } static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) @@ -1177,19 +1177,19 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, - bool enable) + bool enable, int xcc_id) { u32 tmp; /* These interrupts should be enabled to drive DS clock */ - tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); + tmp = RREG32_SOC15(GC, xcc_id, regCP_INT_CNTL_RING0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); - WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); + WREG32_SOC15(GC, xcc_id, regCP_INT_CNTL_RING0, tmp); } static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) @@ -1198,7 +1198,7 @@ static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_xcd; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0); - gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); + gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); gfx_v9_4_3_wait_for_rlc_serdes(adev, i); } } @@ -1228,7 +1228,7 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) { - gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); + gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); udelay(50); } @@ -1865,10 +1865,10 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) struct amdgpu_ring *ring; for (i = 0; i < adev->gfx.num_xcd; i++) { - gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); + gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - gfx_v9_4_3_disable_gpa_mode(adev); + gfx_v9_4_3_disable_gpa_mode(adev, i); r = gfx_v9_4_3_cp_compute_load_microcode(adev, i); if (r) @@ -1888,7 +1888,7 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) amdgpu_ring_test_helper(ring); } - gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); + gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); } return 0; From d51ac6d0a23caf1005cb640f8533161c5d2dd0c0 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 11:22:49 +0800 Subject: [PATCH 055/861] drm/amdgpu: add xcc index argument to select_sh_se function v2 v1: To support multiple XCD case (Le) v2: introduce xcc index to gfx_v11_0_select_sh_se (Hawking) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 16 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 +-- drivers/gpu/drm/amd/amdgpu/cik.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 16 ++++----- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 ++--- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 +++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 33 ++++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 33 ++++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 20 ++++++----- drivers/gpu/drm/amd/amdgpu/nv.c | 4 +-- drivers/gpu/drm/amd/amdgpu/si.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc21.c | 4 +-- drivers/gpu/drm/amd/amdgpu/vi.c | 4 +-- 18 files changed, 110 insertions(+), 106 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index ebb35633058c..ae06d1f2af93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, for (se_idx = 0; se_idx < se_cnt; se_idx++) { for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, 0); queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS); /* @@ -820,7 +820,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0); unlock_spi_csq_mutexes(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f60753f97ac5..cc64ae550bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -136,7 +136,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se_bank, - sh_bank, instance_bank); + sh_bank, instance_bank, 0); } else if (use_ring) { mutex_lock(&adev->srbm_mutex); amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); @@ -169,7 +169,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, end: if (use_bank) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } else if (use_ring) { amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); @@ -263,7 +263,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, rd->id.grbm.sh, - rd->id.grbm.instance); + rd->id.grbm.instance, 0); } if (rd->id.use_srbm) { @@ -295,7 +295,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } end: if (rd->id.use_grbm) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -907,13 +907,13 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); x = 0; if (adev->gfx.funcs->read_wave_data) adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -1001,7 +1001,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); if (bank == 0) { if (adev->gfx.funcs->read_wave_vgprs) @@ -1011,7 +1011,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); } - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index a9e41d7970ea..77e2f714e357 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -237,7 +237,7 @@ struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, @@ -386,7 +386,7 @@ struct amdgpu_gfx { }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) -#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index de6d10390ab2..5641cf05d856 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1141,12 +1141,12 @@ static uint32_t cik_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index accc0a7251b9..323f5b8927ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3490,7 +3490,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev); @@ -4712,7 +4712,7 @@ static int gfx_v10_0_sw_fini(void *handle) } static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -4772,13 +4772,13 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v10_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -4907,7 +4907,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); /* * Set corresponding TCP bits for the inactive WGPs in @@ -4940,7 +4940,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -9540,7 +9540,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); @@ -9561,7 +9561,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 4fbefe236fc7..50d0ff9ca259 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -112,7 +112,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); @@ -1484,7 +1484,7 @@ static int gfx_v11_0_sw_fini(void *handle) } static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -6473,7 +6473,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; counter = 0; - gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 8 && j < 2) gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); @@ -6505,7 +6505,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, active_cu_number += counter; } } - gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d9ce4d1c50e4..7cb72bf1acdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1285,7 +1285,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) } static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -1438,12 +1438,12 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on SI */ - gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); } /* GRBM_GFX_INDEX has a different offset on SI */ - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) @@ -1459,14 +1459,14 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v6_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1487,7 +1487,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1496,7 +1496,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG); } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1535,7 +1535,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); active_cu = gfx_v6_0_get_cu_enabled(adev); @@ -1550,7 +1550,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) } } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -2391,7 +2391,7 @@ static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable) WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); if (!enable) { - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmSPI_LB_CU_MASK, 0x00ff); } } @@ -3571,7 +3571,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v6_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -3593,7 +3593,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 46740ad9a80f..d055e44eee1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1552,7 +1552,8 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * Select which SE, SH combinations to address. */ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -1732,13 +1733,13 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } /** @@ -1761,13 +1762,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1790,7 +1791,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1801,7 +1802,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1911,7 +1912,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -3301,7 +3302,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3309,7 +3310,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3474,7 +3475,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3530,7 +3531,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3584,7 +3585,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3635,7 +3636,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -5115,7 +5116,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v7_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -5136,7 +5137,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8a43e87de49f..b60480876149 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3395,7 +3395,8 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -3579,13 +3580,13 @@ gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on VI */ - gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on VI */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) @@ -3601,13 +3602,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -3630,7 +3631,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -3641,7 +3642,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -3788,7 +3789,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -3819,7 +3820,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3827,7 +3828,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } if (k == adev->usec_timeout) { gfx_v8_0_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -3835,7 +3836,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -5481,7 +5482,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); @@ -6723,11 +6724,11 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data, */ if (from_wq) { mutex_lock(&adev->grbm_idx_mutex); - gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id); + gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0); sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE); - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -7116,7 +7117,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v8_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -7137,7 +7138,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4939fd61355b..2fa7adef18a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1504,7 +1504,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) mask = 1; cu_bitmap = 0; counter = 0; - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (cu_info->bitmap[i][j] & mask) { @@ -1523,7 +1523,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = cu_bitmap; } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1545,7 +1545,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -1594,7 +1594,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -2241,7 +2241,7 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) } void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance) + u32 instance, int xcc_id) { u32 data; @@ -2290,13 +2290,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v9_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -2433,7 +2433,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -2441,7 +2441,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } if (k == adev->usec_timeout) { amdgpu_gfx_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -2449,7 +2449,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -6608,7 +6608,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - amdgpu_gfx_select_se_sh(adev, j, 0x0, k); + amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0); RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); } } @@ -6670,7 +6670,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - amdgpu_gfx_select_se_sh(adev, j, 0, k); + amdgpu_gfx_select_se_sh(adev, j, 0, k, 0); reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) @@ -6685,7 +6685,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); gfx_v9_0_query_utc_edc_status(adev, err_data); @@ -7145,7 +7145,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_0_get_cu_active_bitmap(adev); @@ -7178,7 +7178,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index dfe8d4841f58..f9f6edc5e558 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -27,6 +27,6 @@ extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance); + u32 instance, int xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 93438770ca1a..d648a29c33e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -777,7 +777,7 @@ void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) { u32 tmp; - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); tmp = 0; tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index f1c04140e717..b67be666f38a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -568,7 +568,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance) + u32 instance, + int xcc_id) { u32 data; @@ -591,7 +592,7 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW_EX(reg, GC, xcc_id, regGRBM_GFX_INDEX, data); } static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) @@ -907,13 +908,13 @@ static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev, int xcc_id) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); data = gfx_v9_4_3_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1146,7 +1147,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -1154,7 +1155,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } if (k == adev->usec_timeout) { gfx_v9_4_3_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, + xcc_id); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -1162,7 +1164,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3065,7 +3067,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_4_3_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); @@ -3098,7 +3100,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 47420b403871..148049782f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -371,12 +371,12 @@ static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 7f99e130acd0..f64b87b11b1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1181,12 +1181,12 @@ static uint32_t si_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4b79a8933476..4d1487a9836c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -363,12 +363,12 @@ static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_n mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 514bfc705d5a..6ef4be9322d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -288,12 +288,12 @@ static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_n mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 531f173ade2d..8e70581960fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -762,12 +762,12 @@ static uint32_t vi_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { From 86b20703e4c5a3c39891def0a68e7438aeca9db9 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 27 Jul 2022 14:24:05 +0800 Subject: [PATCH 056/861] drm/amdgpu: add xcc index argument to rlc safe_mode func (v4) v1: To support multple XCD case (Le) v2: unify naming style (Le) v3: apply the changes to gc v11_0 (Hawking) v4: apply the changes to gc SOC21 (Morris) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 16 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 10 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 12 +++---- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 20 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 20 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 20 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 16 ++++----- drivers/gpu/drm/amd/amdgpu/nv.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc21.c | 4 +-- drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c | 6 ++-- .../amd/pm/powerplay/hwmgr/smu7_powertune.c | 12 +++---- .../amd/pm/powerplay/hwmgr/vega10_powertune.c | 36 +++++++++---------- 13 files changed, 90 insertions(+), 90 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index 85fb730d9fc8..d3bed9a3e61f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -34,9 +34,9 @@ * * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. */ -void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) +void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id) { - if (adev->gfx.rlc.in_safe_mode) + if (adev->gfx.rlc.in_safe_mode[xcc_id]) return; /* if RLC is not enabled, do nothing */ @@ -46,8 +46,8 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_3D_CGCG)) { - adev->gfx.rlc.funcs->set_safe_mode(adev); - adev->gfx.rlc.in_safe_mode = true; + adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id); + adev->gfx.rlc.in_safe_mode[xcc_id] = true; } } @@ -58,9 +58,9 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) * * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. */ -void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) +void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id) { - if (!(adev->gfx.rlc.in_safe_mode)) + if (!(adev->gfx.rlc.in_safe_mode[xcc_id])) return; /* if RLC is not enabled, do nothing */ @@ -70,8 +70,8 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_3D_CGCG)) { - adev->gfx.rlc.funcs->unset_safe_mode(adev); - adev->gfx.rlc.in_safe_mode = false; + adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id); + adev->gfx.rlc.in_safe_mode[xcc_id] = false; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 23f060db9255..80b263646966 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -157,8 +157,8 @@ typedef struct _RLC_TABLE_OF_CONTENT { struct amdgpu_rlc_funcs { bool (*is_rlc_enabled)(struct amdgpu_device *adev); - void (*set_safe_mode)(struct amdgpu_device *adev); - void (*unset_safe_mode)(struct amdgpu_device *adev); + void (*set_safe_mode)(struct amdgpu_device *adev, int xcc_id); + void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id); int (*init)(struct amdgpu_device *adev); u32 (*get_csb_size)(struct amdgpu_device *adev); void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); @@ -201,7 +201,7 @@ struct amdgpu_rlc { u32 cp_table_size; /* safe mode for updating CG/PG state */ - bool in_safe_mode; + bool in_safe_mode[8]; const struct amdgpu_rlc_funcs *funcs; /* for firmware data */ @@ -260,8 +260,8 @@ struct amdgpu_rlc { struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl; }; -void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev); -void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev); +void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id); +void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws); int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev); int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 323f5b8927ad..8bd07ff59671 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7571,7 +7571,7 @@ static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev) return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; } -static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; @@ -7612,7 +7612,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; @@ -7959,7 +7959,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, bool enable) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); if (enable) { /* enable FGCG firstly*/ @@ -7998,7 +7998,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, AMD_CG_SUPPORT_GFX_3D_CGLS)) gfx_v10_0_enable_gui_idle_interrupt(adev, enable); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -8092,11 +8092,11 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); gfx_v10_cntl_power_gating(adev, enable); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 50d0ff9ca259..d3c89e6c0c03 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -123,8 +123,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, uint16_t pasid, uint32_t flush_type, bool all_hub, uint8_t dst_sel); -static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev); -static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev); +static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); +static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, bool enable); @@ -4532,7 +4532,7 @@ static int gfx_v11_0_soft_reset(void *handle) tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); - gfx_v11_0_set_safe_mode(adev); + gfx_v11_0_set_safe_mode(adev, 0); for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { @@ -4632,7 +4632,7 @@ static int gfx_v11_0_soft_reset(void *handle) tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); - gfx_v11_0_unset_safe_mode(adev); + gfx_v11_0_unset_safe_mode(adev, 0); return gfx_v11_0_cp_resume(adev); } @@ -4798,7 +4798,7 @@ static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; } -static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; @@ -4817,7 +4817,7 @@ static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); } @@ -5045,7 +5045,7 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, bool enable) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); @@ -5065,7 +5065,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, AMD_CG_SUPPORT_GFX_3D_CGLS)) gfx_v11_0_enable_gui_idle_interrupt(adev, enable); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -5133,11 +5133,11 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); gfx_v11_cntl_power_gating(adev, enable); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static int gfx_v11_0_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d055e44eee1d..d56dda5fc588 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3362,7 +3362,7 @@ static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) return true; } -static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { u32 tmp, i, mask; @@ -3384,7 +3384,7 @@ static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { u32 tmp; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b60480876149..278416acf060 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4903,7 +4903,7 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); if (!gfx_v8_0_wait_for_idle(adev)) gfx_v8_0_cp_enable(adev, false); else @@ -4912,7 +4912,7 @@ static int gfx_v8_0_hw_fini(void *handle) adev->gfx.rlc.funcs->stop(adev); else pr_err("rlc is busy, skip halt rlc\n"); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -5377,7 +5377,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GFX_DMG)) - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5431,7 +5431,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GFX_DMG)) - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -5536,7 +5536,7 @@ static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev) return true; } -static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; @@ -5563,7 +5563,7 @@ static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; @@ -5622,7 +5622,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev { uint32_t temp, data; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { @@ -5718,7 +5718,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev gfx_v8_0_wait_for_rlc_serdes(adev); } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, @@ -5728,7 +5728,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); @@ -5811,7 +5811,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev gfx_v8_0_wait_for_rlc_serdes(adev); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2fa7adef18a9..bce6919d666a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4623,7 +4623,7 @@ static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev) return true; } -static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; @@ -4640,7 +4640,7 @@ static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; @@ -4651,7 +4651,7 @@ static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev) static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); @@ -4663,7 +4663,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, @@ -4690,7 +4690,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev { uint32_t data, def; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { @@ -4757,7 +4757,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev } } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, @@ -4768,7 +4768,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, if (!adev->gfx.num_gfx_rings) return; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* Enable 3D CGCG/CGLS */ if (enable) { @@ -4812,7 +4812,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, @@ -4820,7 +4820,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev { uint32_t def, data; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); @@ -4864,7 +4864,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b67be666f38a..baa10ee8ec69 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1090,14 +1090,14 @@ static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) return true; } -static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev) +static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); + WREG32_SOC15(GC, xcc_id, regRLC_SAFE_MODE, data); /* wait for RLC_SAFE_MODE */ for (i = 0; i < adev->usec_timeout; i++) { @@ -1107,12 +1107,12 @@ static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev) +static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; data = RLC_SAFE_MODE__CMD_MASK; - WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); + WREG32_SOC15(GC, xcc_id, regRLC_SAFE_MODE, data); } static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) @@ -2125,7 +2125,7 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad { uint32_t data, def; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { @@ -2186,7 +2186,7 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad } } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); } static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *adev, @@ -2194,7 +2194,7 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad { uint32_t def, data; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); @@ -2238,7 +2238,7 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data); } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); } static int gfx_v9_4_3_update_gfx_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 148049782f50..dabeeab2f2ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -629,9 +629,9 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, bool enter) { if (enter) - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); else - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); if (adev->gfx.funcs->update_perfmon_mgcg) adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 6ef4be9322d9..7d59303ca2f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -549,9 +549,9 @@ static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev, bool enter) { if (enter) - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); else - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); if (adev->gfx.funcs->update_perfmon_mgcg) adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index f5e08b60f66e..36c831b280ed 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); if (enable) { ret = kv_program_pt_config_registers(adev, didt_config_kv); if (ret) { - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return ret; } } kv_do_enable_didt(adev, enable); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c index 32a5a00fd8ae..21be23ec3c79 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c @@ -973,7 +973,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TCPRamping)) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); mutex_lock(&adev->grbm_idx_mutex); value = 0; value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); @@ -1048,13 +1048,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) } mutex_unlock(&adev->grbm_idx_mutex); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } return 0; error: mutex_unlock(&adev->grbm_idx_mutex); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return result; } @@ -1068,7 +1068,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TCPRamping)) { - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); result = smu7_enable_didt(hwmgr, false); PP_ASSERT_WITH_CODE((result == 0), @@ -1081,12 +1081,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == result), "Failed to disable DPM DIDT.", goto error); } - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } return 0; error: - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return result; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c index 9757d47dd6b8..309a9d3bc1b7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c @@ -915,7 +915,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) num_se = adev->gfx.config.max_shader_engines; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { @@ -940,7 +940,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, true); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -949,11 +949,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); vega10_didt_set_mask(hwmgr, false); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -966,7 +966,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) num_se = adev->gfx.config.max_shader_engines; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { @@ -985,7 +985,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, true); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); if (PP_CAP(PHM_PlatformCaps_GCEDC)) @@ -1002,11 +1002,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t data; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); vega10_didt_set_mask(hwmgr, false); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; @@ -1027,7 +1027,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) num_se = adev->gfx.config.max_shader_engines; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { @@ -1048,7 +1048,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, true); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -1057,11 +1057,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); vega10_didt_set_mask(hwmgr, false); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } @@ -1075,7 +1075,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) num_se = adev->gfx.config.max_shader_engines; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); @@ -1096,7 +1096,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, true); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); @@ -1116,11 +1116,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t data; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); vega10_didt_set_mask(hwmgr, false); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; @@ -1138,7 +1138,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; int result; - amdgpu_gfx_rlc_enter_safe_mode(adev); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); mutex_lock(&adev->grbm_idx_mutex); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); @@ -1151,7 +1151,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, false); - amdgpu_gfx_rlc_exit_safe_mode(adev); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return 0; } From 79b6e265d92092b49252f546e1a0f63ae8851f83 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 12:28:59 -0400 Subject: [PATCH 057/861] drm/amd/display: Fixes for dcn32_clk_mgr implementation [Why&How] Fix CLK MGR early initialization and add logging. Fixes: 265280b99822 ("drm/amd/display: add CLKMGR changes for DCN32/321") Reviewed-by: Leo Li Reviewed-by: Qingqing Zhuo Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 2b8a81b6d53b..eea103908b09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -905,6 +905,8 @@ void dcn32_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + struct clk_log_info log_info = {0}; + clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn32_funcs; if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) { @@ -938,6 +940,7 @@ void dcn32_clk_mgr_construct( clk_mgr->base.clks.ref_dtbclk_khz = 268750; } + /* integer part is now VCO frequency in kHz */ clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr); @@ -945,6 +948,8 @@ void dcn32_clk_mgr_construct( if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */ + dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); + if (ctx->dc->debug.disable_dtb_ref_clk_switch && clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) { clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk; From 224d3df954c184826657bbacd2a562dc99478cb3 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Wed, 23 Feb 2022 10:23:25 -0500 Subject: [PATCH 058/861] drm/amd/display: Clear GPINT1 before taking DMCUB out of reset [Why] Workaround for DMCUB front door load [How] Clear GPINT after reset so its consistent Signed-off-by: Samson Tam Acked-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index a76da0131add..568a2702d5f7 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -116,10 +116,6 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) break; } - /* Clear the GPINT command manually so we don't reset again. */ - cmd.all = 0; - dmub->hw_funcs.set_gpint(dmub, cmd); - /* Force reset in case we timed out, DMCUB is likely hung. */ } @@ -131,6 +127,10 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); + + /* Clear the GPINT command manually so we don't reset again. */ + cmd.all = 0; + dmub->hw_funcs.set_gpint(dmub, cmd); } void dmub_dcn32_reset_release(struct dmub_srv *dmub) From 66daccde429611530db82605c197be01adadb389 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 1 Dec 2021 16:44:18 +0800 Subject: [PATCH 059/861] drm/amdgpu: add master/slave check in init phase Skip KCQ setup on slave xcc as there's no use case. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 59 +++++++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 10 +++-- 4 files changed, 50 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 7f5c60381103..c83fb4277233 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -489,16 +489,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) return -EINVAL; spin_lock(&kiq->ring_lock); - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * - adev->gfx.num_compute_rings)) { - spin_unlock(&adev->gfx.kiq[0].ring_lock); - return -ENOMEM; - } + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * + adev->gfx.num_compute_rings)) { + spin_unlock(&kiq->ring_lock); + return -ENOMEM; + } - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], - RESET_QUEUES, 0, 0); + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_unmap_queues(kiq_ring, + &adev->gfx.compute_ring[i], + RESET_QUEUES, 0, 0); + } } if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) @@ -549,22 +552,26 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); spin_lock(&kiq->ring_lock); - r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * - adev->gfx.num_compute_rings + - kiq->pmf->set_resources_size); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&adev->gfx.kiq[0].ring_lock); - return r; - } + /* No need to map kcq on the slave */ + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * + adev->gfx.num_compute_rings + + kiq->pmf->set_resources_size); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + spin_unlock(&adev->gfx.kiq[0].ring_lock); + return r; + } - if (adev->enable_mes) - queue_mask = ~0ULL; + if (adev->enable_mes) + queue_mask = ~0ULL; - kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); + kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_map_queues(kiq_ring, + &adev->gfx.compute_ring[i]); + } } r = amdgpu_ring_test_helper(kiq_ring); @@ -1078,3 +1085,9 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); } } + +bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) +{ + return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? + adev->gfx.num_xcc_per_xcp : 1)); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 77e2f714e357..a2d311a4da5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -462,4 +462,6 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id) int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); + +bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 4ff348e10e4d..ef96ff2f4272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -406,6 +406,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) else tmo = tmo_gfx; + /* skip ib test on the slave kcq */ + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && + !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id)) + continue; + r = amdgpu_ring_test_ib(ring, tmo); if (!r) { DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index baa10ee8ec69..572f84f487cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1885,9 +1885,13 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) if (r) return r; - for (j = 0; j < adev->gfx.num_compute_rings; j++) { - ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings]; - amdgpu_ring_test_helper(ring); + /* skip ring test on slave kcq */ + if (amdgpu_gfx_is_master_xcc(adev, i)) { + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring[j + + i * adev->gfx.num_compute_rings]; + amdgpu_ring_test_helper(ring); + } } gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); From 4e7f84ec068cec6a9a72fe0f558e0ae4cf765c51 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 16 Dec 2021 15:35:25 +0800 Subject: [PATCH 060/861] drm/amdgpu: initialize num_xcd to 1 for gfx v9_0 Assign value here as the num_xcd is referenced in some gfx9 common path. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index bce6919d666a..8fb027cf1bfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4543,6 +4543,7 @@ static int gfx_v9_0_early_init(void *handle) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; + adev->gfx.num_xcd = 1; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_0_set_kiq_pm4_funcs(adev); From b0dc10428460ac2408cf5f82fc3562e9e57324e1 Mon Sep 17 00:00:00 2001 From: Cruise Hung Date: Fri, 13 May 2022 09:16:42 +0800 Subject: [PATCH 061/861] drm/amd/display: Reset OUTBOX0 r/w pointer on DMUB reset [Why & How] We missed resetting OUTBOX0 mailbox r/w pointer on DMUB reset. Fix it. Fixes: 6ecf9773a503 ("drm/amd/display: Fix DMUB outbox trace in S4 (#4465)") Signed-off-by: Cruise Hung Acked-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 568a2702d5f7..b45ac31ba555 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -126,6 +126,8 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); /* Clear the GPINT command manually so we don't reset again. */ From 01382501509871d0799bab6bd412c228486af5bf Mon Sep 17 00:00:00 2001 From: Longlong Yao Date: Thu, 13 Apr 2023 13:21:45 +0800 Subject: [PATCH 062/861] drm/amdgpu: fix calltrace warning in amddrm_buddy_fini The following call trace is observed when removing the amdgpu driver, which is caused by that BOs allocated for psp are not freed until removing. [61811.450562] RIP: 0010:amddrm_buddy_fini.cold+0x29/0x47 [amddrm_buddy] [61811.450577] Call Trace: [61811.450577] [61811.450579] amdgpu_vram_mgr_fini+0x135/0x1c0 [amdgpu] [61811.450728] amdgpu_ttm_fini+0x207/0x290 [amdgpu] [61811.450870] amdgpu_bo_fini+0x27/0xa0 [amdgpu] [61811.451012] gmc_v9_0_sw_fini+0x4a/0x60 [amdgpu] [61811.451166] amdgpu_device_fini_sw+0x117/0x520 [amdgpu] [61811.451306] amdgpu_driver_release_kms+0x16/0x30 [amdgpu] [61811.451447] devm_drm_dev_init_release+0x4d/0x80 [drm] [61811.451466] devm_action_release+0x15/0x20 [61811.451469] release_nodes+0x40/0xb0 [61811.451471] devres_release_all+0x9b/0xd0 [61811.451473] __device_release_driver+0x1bb/0x2a0 [61811.451476] driver_detach+0xf3/0x140 [61811.451479] bus_remove_driver+0x6c/0xf0 [61811.451481] driver_unregister+0x31/0x60 [61811.451483] pci_unregister_driver+0x40/0x90 [61811.451486] amdgpu_exit+0x15/0x447 [amdgpu] For smu v13_0_2, if the GPU supports xgmi, refer to commit f5c7e7797060 ("drm/amdgpu: Adjust removal control flow for smu v13_0_2"), it will run gpu recover in AMDGPU_RESET_FOR_DEVICE_REMOVE mode when removing, which makes all devices in hive list have hw reset but no resume except the basic ip blocks, then other ip blocks will not call .hw_fini according to ip_block.status.hw. Since psp_free_shared_bufs just includes some software operations, so move it to psp_sw_fini. Reviewed-by: Guchun Chen Reviewed-by: Feifei Xu Signed-off-by: Longlong Yao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 9d7e6e0e73ed..a496bf2fb199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -520,6 +520,8 @@ static int psp_sw_fini(void *handle) kfree(cmd); cmd = NULL; + psp_free_shared_bufs(psp); + if (psp->km_ring.ring_mem) amdgpu_bo_free_kernel(&adev->firmware.rbuf, &psp->km_ring.ring_mem_mc_addr, @@ -2655,8 +2657,6 @@ static int psp_hw_fini(void *handle) psp_ring_destroy(psp, PSP_RING_TYPE__KM); - psp_free_shared_bufs(psp); - return 0; } From 8eba72053c682d8ae652bed4a62546239e58390b Mon Sep 17 00:00:00 2001 From: Candice Li Date: Wed, 19 Apr 2023 17:28:19 +0800 Subject: [PATCH 063/861] drm/amdgpu: Drop pcie_bif ras check from fatal error handler Some ASICs support fatal error event but do not support pcie_bif ras. Signed-off-by: Candice Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3ab8a88789c8..22f401fd1901 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1597,8 +1597,7 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) { /* Fatal error events are handled on host side */ - if (amdgpu_sriov_vf(adev) || - !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) + if (amdgpu_sriov_vf(adev)) return; if (adev->nbio.ras && From 5c65a4b8e65e3247f86ec5152da138500edcf50c Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 19 Apr 2023 14:55:03 +0530 Subject: [PATCH 064/861] drm/amd/amdgpu: Fix spaces in array indexing and indentations in amdgpu_kms.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following errors reported by checkpatch: ERROR: space prohibited before open square bracket '[' +#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type ERROR: code indent should use tabs where possible + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;$ Cc: Christian König Cc: Alex Deucher Cc: Mario Limonciello Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1a2e342af1c0..a5bae7eb993a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1458,7 +1458,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { -#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type +#define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type TA_FW_NAME(XGMI), TA_FW_NAME(RAS), TA_FW_NAME(HDCP), @@ -1557,7 +1557,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) fw_info.feature, fw_info.ver); /* RLCV */ - query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV; + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV; ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; From 47fc644f801e4414753a9b7e87ed41f991cd68c3 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 19 Apr 2023 16:12:45 +0530 Subject: [PATCH 065/861] drm/amd/amdgpu: Fix style errors in amdgpu_drv.c & amdgpu_device.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix following checkpatch style errors in amdgpu_drv.c & amdgpu_device.c ERROR: exactly one space required after that #ifdef ERROR: spaces required around that '+=' (ctx:WxV) ERROR: space required before the open brace '{' ERROR: spaces required around that '||' (ctx:VxE) ERROR: space prohibited before that close parenthesis ')' ERROR: space required before the open parenthesis '(' ERROR: space required before the open brace '{' ERROR: code indent should use tabs where possible Cc: Christian König Cc: Alex Deucher Cc: Mario Limonciello Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 65 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 2 files changed, 34 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9b1eaba85bbd..ac78caa7cba8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -998,7 +998,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, if (array_size % 3) return; - for (i = 0; i < array_size; i +=3) { + for (i = 0; i < array_size; i += 3) { reg = registers[i + 0]; and_mask = registers[i + 1]; or_mask = registers[i + 2]; @@ -1547,7 +1547,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); amdgpu_sched_jobs = 4; - } else if (!is_power_of_2(amdgpu_sched_jobs)){ + } else if (!is_power_of_2(amdgpu_sched_jobs)) { dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); @@ -2759,8 +2759,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ - if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)|| - adev->asic_type == CHIP_ALDEBARAN )) + if (amdgpu_passthrough(adev) && + ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || + adev->asic_type == CHIP_ALDEBARAN)) amdgpu_dpm_handle_passthrough_sbr(adev, true); if (adev->gmc.xgmi.num_physical_nodes > 1) { @@ -3089,7 +3090,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } adev->ip_blocks[i].status.hw = false; /* handle putting the SMC in the appropriate state */ - if(!amdgpu_sriov_vf(adev)){ + if (!amdgpu_sriov_vf(adev)) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); if (r) { @@ -4036,7 +4037,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); - if (adev->mode_info.mode_config_initialized){ + if (adev->mode_info.mode_config_initialized) { if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else @@ -4697,42 +4698,42 @@ disabled: int amdgpu_device_mode1_reset(struct amdgpu_device *adev) { - u32 i; - int ret = 0; + u32 i; + int ret = 0; - amdgpu_atombios_scratch_regs_engine_hung(adev, true); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); - dev_info(adev->dev, "GPU mode1 reset\n"); + dev_info(adev->dev, "GPU mode1 reset\n"); - /* disable BM */ - pci_clear_master(adev->pdev); + /* disable BM */ + pci_clear_master(adev->pdev); - amdgpu_device_cache_pci_state(adev->pdev); + amdgpu_device_cache_pci_state(adev->pdev); - if (amdgpu_dpm_is_mode1_reset_supported(adev)) { - dev_info(adev->dev, "GPU smu mode1 reset\n"); - ret = amdgpu_dpm_mode1_reset(adev); - } else { - dev_info(adev->dev, "GPU psp mode1 reset\n"); - ret = psp_gpu_reset(adev); - } + if (amdgpu_dpm_is_mode1_reset_supported(adev)) { + dev_info(adev->dev, "GPU smu mode1 reset\n"); + ret = amdgpu_dpm_mode1_reset(adev); + } else { + dev_info(adev->dev, "GPU psp mode1 reset\n"); + ret = psp_gpu_reset(adev); + } - if (ret) - dev_err(adev->dev, "GPU mode1 reset failed\n"); + if (ret) + dev_err(adev->dev, "GPU mode1 reset failed\n"); - amdgpu_device_load_pci_state(adev->pdev); + amdgpu_device_load_pci_state(adev->pdev); - /* wait for asic to come out of reset */ - for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio.funcs->get_memsize(adev); + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + u32 memsize = adev->nbio.funcs->get_memsize(adev); - if (memsize != 0xffffffff) - break; - udelay(1); - } + if (memsize != 0xffffffff) + break; + udelay(1); + } - amdgpu_atombios_scratch_regs_engine_hung(adev, false); - return ret; + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + return ret; } int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b1ca1ab6d6ad..b400d598b75a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1660,7 +1660,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = { }; static const struct pci_device_id pciidlist[] = { -#ifdef CONFIG_DRM_AMDGPU_SI +#ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, From 47659738fbd2f06730635a487605002ea9b11f3d Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 19 Nov 2021 15:35:30 +0800 Subject: [PATCH 066/861] drm/amdgpu: allocate doorbell index for multi-die case Allocate different doorbell index for kiq/kcq rings on each die Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 5 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 8 +++++++- 5 files changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 8fd11497faba..ffb75d23d2fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -83,6 +83,8 @@ struct amdgpu_doorbell_index { }; uint32_t first_non_cp; uint32_t last_non_cp; + uint32_t xcc1_kiq_start; + uint32_t xcc1_mec_ring0_start; uint32_t max_assignment; /* Per engine SDMA doorbell size in dword */ uint32_t sdma_doorbell_range; @@ -164,7 +166,12 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0, AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7, - AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F, + /* kiq/kcq from second XCD. Max 8 XCDs */ + AMDGPU_VEGA20_DOORBELL_XCC1_KIQ_START = 0x190, + /* 8 compute rings per GC. Max to 0x1CE */ + AMDGPU_VEGA20_DOORBELL_XCC1_MEC_RING0_START = 0x197, + + AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1CE, AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF } AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c83fb4277233..465ad0b7cddb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -316,6 +316,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->doorbell_index = adev->doorbell_index.kiq; ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB_0; + if (xcc_id >= 1) + ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start + + xcc_id - 1; + else + ring->doorbell_index = adev->doorbell_index.kiq; r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 1d5af50331e4..d58353c89e59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -270,6 +270,7 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); entry->timestamp_src = dw[2] >> 31; entry->pasid = dw[3] & 0xffff; + entry->node_id = (dw[3] >> 16) & 0xff; entry->pasid_src = dw[3] >> 31; entry->src_data[0] = dw[4]; entry->src_data[1] = dw[5]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index be243adf3e65..7a8e686bdd41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -53,6 +53,7 @@ struct amdgpu_iv_entry { uint64_t timestamp; unsigned timestamp_src; unsigned pasid; + unsigned node_id; unsigned pasid_src; unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW]; const uint32_t *iv_entry; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 572f84f487cd..56a415e151d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -752,7 +752,13 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; + if (xcc_id >= 1) + ring->doorbell_index = + (adev->doorbell_index.xcc1_mec_ring0_start + + ring_id - adev->gfx.num_compute_rings) << 1; + else + ring->doorbell_index = + (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); ring->vm_hub = AMDGPU_GFXHUB_0; From 948ca54c424be395402624ca0e21ad5ddf77cb6a Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 19 Apr 2023 17:35:15 +0530 Subject: [PATCH 067/861] drm/amd/display: Remove unused variables in dcn21_hwseq.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the below compiler error: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn21/dcn21_hwseq.c:229:11: error: unused variable 'otg_inst' [-Werror,-Wunused-variable]         uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;                  ^ drivers/gpu/drm/amd/amdgpu/../display/dc/dcn21/dcn21_hwseq.c:226:20: error: unused variable 'cmd' [-Werror,-Wunused-variable]         union dmub_rb_cmd cmd; Cc: Aurabindo Pillai Cc: Jerry Zuo Cc: Yongqiang Sun Signed-off-by: Srinivasan Shanmugam Reviewed-by: Yongqiang Sun Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 55a464a39529..43463d08f21b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -223,10 +223,8 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { - union dmub_rb_cmd cmd; struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; if (dc->dc->res_pool->dmcu) { From 8fa33bd8d327ae2f9b602cd883f32efc4662bea0 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 15:48:48 -0400 Subject: [PATCH 068/861] drm/amd/display: Do not clear GPINT register when releasing DMUB from reset [Why & How] There's no need to clear GPINT register for DMUB when releasing it from reset. Fix that. Fixes: ac2e555e0a7f ("drm/amd/display: Add DMCUB source files and changes for DCN32/321") Reviewed-by: Leo Li Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index b45ac31ba555..a7d5607459ed 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -137,7 +137,6 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) void dmub_dcn32_reset_release(struct dmub_srv *dmub) { - REG_WRITE(DMCUB_GPINT_DATAIN1, 0); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); From 8b6a6aa5d6d2be6a0669a29deb89184aaa0bad65 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 15:59:45 -0400 Subject: [PATCH 069/861] drm/amd/display: Update bounding box values for DCN321 [Why&how] Update bounding box values as per hardware spec Fixes: 197485c69543 ("drm/amd/display: Create dcn321_fpu file") Acked-by: Leo Li Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dml/dcn321/dcn321_fpu.c | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 57b9bd896678..342a1bcb4927 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -106,16 +106,16 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .clock_limits = { { .state = 0, - .dcfclk_mhz = 1564.0, - .fabricclk_mhz = 400.0, - .dispclk_mhz = 2150.0, - .dppclk_mhz = 2150.0, + .dcfclk_mhz = 1434.0, + .fabricclk_mhz = 2250.0, + .dispclk_mhz = 1720.0, + .dppclk_mhz = 1720.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .phyclk_d32_mhz = 625.0, + .phyclk_d32_mhz = 313.0, .socclk_mhz = 1200.0, - .dscclk_mhz = 716.667, - .dram_speed_mts = 1600.0, + .dscclk_mhz = 573.333, + .dram_speed_mts = 16000.0, .dtbclk_mhz = 1564.0, }, }, @@ -125,14 +125,14 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .sr_exit_z8_time_us = 285.0, .sr_enter_plus_exit_z8_time_us = 320, .writeback_latency_us = 12.0, - .round_trip_ping_latency_dcfclk_cycles = 263, + .round_trip_ping_latency_dcfclk_cycles = 207, .urgent_latency_pixel_data_only_us = 4, .urgent_latency_pixel_mixed_with_vm_data_us = 4, .urgent_latency_vm_data_only_us = 4, - .fclk_change_latency_us = 20, - .usr_retraining_latency_us = 2, - .smn_latency_us = 2, - .mall_allocated_for_dcn_mbytes = 64, + .fclk_change_latency_us = 7, + .usr_retraining_latency_us = 0, + .smn_latency_us = 0, + .mall_allocated_for_dcn_mbytes = 32, .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, From 7abac457ba75e3c6f6468036ad6f424bc56b2750 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 19 Apr 2023 18:40:00 +0530 Subject: [PATCH 070/861] drm/amd/amdgpu: Fix style issues in amdgpu_discovery.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix following checkpatch errors in amdgpu_discovery.c ERROR: space required after that ',' (ctx:VxV) ERROR: space required before the open parenthesis '(' ERROR: code indent should use tabs where possible Cc: Christian König Cc: Alex Deucher Cc: Mario Limonciello Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 0ecce0b92b82..0ba013275dc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -311,7 +311,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) { /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */ if (amdgpu_discovery == 2) - dev_info(adev->dev,"force read ip discovery binary from file"); + dev_info(adev->dev, "force read ip discovery binary from file"); else dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); @@ -323,7 +323,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } /* check the ip discovery binary signature */ - if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); r = -EINVAL; goto out; @@ -529,8 +529,8 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, break; default: break; - } - } + } + } next_ip: ip_offset += struct_size(ip, base_address, ip->num_base_address); } From 92d1fe5954dc28c4e0260b730dd79d2acfdfa29f Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Thu, 6 Apr 2023 16:10:04 -0400 Subject: [PATCH 071/861] drm/amd/display: add support for low bpc [WHY&HOW] Low bpc timings are failing validation, port a patch to allow them to pass. Signed-off-by: Dillon Varone Acked-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../dc/dml/dcn32/display_mode_vba_util_32.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 61cc4904ade4..cad2bc3aea67 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1595,7 +1595,6 @@ double dml32_TruncToValidBPP( unsigned int NonDSCBPP0; unsigned int NonDSCBPP1; unsigned int NonDSCBPP2; - unsigned int NonDSCBPP3; if (Format == dm_420) { NonDSCBPP0 = 12; @@ -1604,10 +1603,9 @@ double dml32_TruncToValidBPP( MinDSCBPP = 6; MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16; } else if (Format == dm_444) { - NonDSCBPP0 = 18; - NonDSCBPP1 = 24; - NonDSCBPP2 = 30; - NonDSCBPP3 = 36; + NonDSCBPP0 = 24; + NonDSCBPP1 = 30; + NonDSCBPP2 = 36; MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { @@ -1661,9 +1659,7 @@ double dml32_TruncToValidBPP( else return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0; } else { - if (MaxLinkBPP >= NonDSCBPP3) - return NonDSCBPP3; - else if (MaxLinkBPP >= NonDSCBPP2) + if (MaxLinkBPP >= NonDSCBPP2) return NonDSCBPP2; else if (MaxLinkBPP >= NonDSCBPP1) return NonDSCBPP1; @@ -1674,7 +1670,7 @@ double dml32_TruncToValidBPP( } } else { if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || - DesiredBPP == NonDSCBPP0 || DesiredBPP == NonDSCBPP3)) || + DesiredBPP <= NonDSCBPP0)) || (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) return BPP_INVALID; else From a1f1fecd04f0b9ef600898c7f9b2094504127fd7 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 16:16:33 -0400 Subject: [PATCH 072/861] drm/amd/display: Set DRAM clock if retraining is required Set DRAM clock change state if retraining is required. Reviewed-by: Rodrigo Siqueira Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index cad2bc3aea67..d39e77d95fc3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -4338,7 +4338,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; } if (v->USRRetrainingRequiredFinal) - v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark + v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark + mmSOCParameters.USRRetrainingLatency; if (TotalActiveWriteback <= 1) { From 974ce18160bb16c8addf6889fff977564404b87b Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 16:19:16 -0400 Subject: [PATCH 073/861] drm/amd/display: Add check for PState change in DCN32 For pstate change, allow DML to loop through all possible prefetch combinations so as to support more display configurations. Set the max and min prefetch modes to enable the sequence. Reviewed-by: Rodrigo Siqueira Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index d39e77d95fc3..a50e7f4dce42 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -4656,6 +4656,10 @@ void dml32_CalculateMinAndMaxPrefetchMode( } else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) { *MinPrefetchMode = 0; *MaxPrefetchMode = 0; + } else if (AllowForPStateChangeOrStutterInVBlankFinal == + dm_prefetch_support_uclk_fclk_and_stutter_if_possible) { + *MinPrefetchMode = 0; + *MaxPrefetchMode = 3; } else { *MinPrefetchMode = 0; *MaxPrefetchMode = 3; From 2c30f85551211a48f7be57499a6fcb4ad00875da Mon Sep 17 00:00:00 2001 From: Jasdeep Dhillon Date: Tue, 28 Feb 2023 11:46:31 -0500 Subject: [PATCH 074/861] drm/amd/display: Isolate remaining FPU code in DCN32 [Why] DCN32 resource contains code that uses FPU. [How] Moved code into DCN32 FPU Reviewed-by: Rodrigo Siqueira Signed-off-by: Jasdeep Dhillon Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h | 2 ++ 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 2f7723053042..b8a2518faecc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2881,3 +2881,9 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint } return vactive_found; } + +void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) +{ + dc_assert_fp_enabled(); + dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 9a0806a0e2ef..dcf512cd3072 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -80,4 +80,6 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req); +void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb); + #endif From 44243719117171e947317a1aca4625c317f7dcb0 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 30 Apr 2020 15:38:04 -0400 Subject: [PATCH 075/861] drm/amd/display: Limit nv21 dst_y Dst_y can become negative in extreme odm 4to1 cases. While not strictly invalid, this should be limited to 0 for rq/dlg/ttu calculation. Reviewed-by: Rodrigo Siqueira Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index cd3cfcb2a2b0..0497a5d74a62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -980,7 +980,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, unsigned int vstartup_start = 0; unsigned int dst_x_after_scaler = 0; - unsigned int dst_y_after_scaler = 0; + int dst_y_after_scaler = 0; double line_wait = 0; double dst_y_prefetch = 0; double dst_y_per_vm_vblank = 0; @@ -1171,6 +1171,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + if (dst_y_after_scaler < 0) + dst_y_after_scaler = 0; // do some adjustment on the dst_after scaler to account for odm combine mode dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", From 05ffbdf4dbd55b9526535bddddf40dafdc2b27d4 Mon Sep 17 00:00:00 2001 From: Sherry Wang Date: Sun, 9 Oct 2022 17:05:51 +0800 Subject: [PATCH 076/861] drm/amd/display: correct DML calc error [Why] DML calculation is different from HW formula. [How] Correct the bug to keep it same as HW formula. Reviewed-by: Rodrigo Siqueira Signed-off-by: Sherry Wang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 4 ++-- .../gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c | 4 ++-- .../gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 7d0626e42ea6..dea2b84e5ebe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -4939,8 +4939,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } v->TotImmediateFlipBytes = 0.0; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { - v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] - + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]; + v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]); } for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index bd674dc30df3..330b089d6a86 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -5274,8 +5274,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->TotImmediateFlipBytes = 0.0; for (k = 0; k < v->NumberOfActivePlanes; k++) { v->TotImmediateFlipBytes = v->TotImmediateFlipBytes - + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] - + v->DPTEBytesPerRow[i][j][k]; + + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] + + v->DPTEBytesPerRow[i][j][k]); } for (k = 0; k < v->NumberOfActivePlanes; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 7eb2173b7691..27b83162ae45 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -5371,8 +5371,8 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ v->TotImmediateFlipBytes = 0.0; for (k = 0; k < v->NumberOfActivePlanes; k++) { v->TotImmediateFlipBytes = v->TotImmediateFlipBytes - + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] - + v->DPTEBytesPerRow[i][j][k]; + + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k] + + v->DPTEBytesPerRow[i][j][k]); } for (k = 0; k < v->NumberOfActivePlanes; k++) { From 0244b0f7d5ac2b6a8d5716f6dd2fb2631f13a3b5 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 16:46:30 -0400 Subject: [PATCH 077/861] drm/amd/display: Add extra check for 444 16 format DCN30 is missing a check for the pixel format 444 when using 16bits before setting the flag that Viewport exceeds the surface. Reviewed-by: Rodrigo Siqueira Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index dea2b84e5ebe..9af1a43c042b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -5130,7 +5130,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ViewportExceedsSurface = true; if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 - && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { + && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) { ViewportExceedsSurface = true; } From 72a9be2f444a9fd4ab34e6329fa1889dbd3a7ef3 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Fri, 11 Mar 2022 16:12:58 -0500 Subject: [PATCH 078/861] drm/amd/display: 3-plane MPO enablement for DCN321 Enable 3-planes MPO for DCN321 by reporting max_slave_planes in DC caps for each ASIC. Reviewed-by: Rodrigo Siqueira Signed-off-by: Krunoslav Kovac Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index cf21b240fc55..138657c38afe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1727,9 +1727,9 @@ static bool dcn321_resource_construct( dc->caps.subvp_pstate_allow_width_us = 20; dc->caps.subvp_vertical_int_margin_us = 30; dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.dp_hpo = true; From 4335077a76095ff75dc0ffb031aeae93f9f5e80f Mon Sep 17 00:00:00 2001 From: Meenakshikumar Somasundaram Date: Wed, 12 Jan 2022 19:58:04 -0500 Subject: [PATCH 079/861] drm/amd/display: Adjust dmub outbox notification enable [Why] Currently driver enables dmub outbox notification before oubox ISR is registered. During boot scenario, sometimes dmub issues hpd outbox message before driver registers ISR and those messages are missed. [How] Enable dmub outbox notification after outbox ISR is registered. Also, restructured outbox enable code to call from dm layer and renamed APIs. Reviewed-by: Rodrigo Siqueira Signed-off-by: Meenakshikumar Somasundaram Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index e0c74868d2ee..890268d95495 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -197,10 +197,6 @@ void dcn31_init_hw(struct dc *dc) } } - /* Enables outbox notifications for usb4 dpia */ - if (dc->res_pool->usb4_dpia_count) - dmub_enable_outbox_notification(dc->ctx->dmub_srv); - /* we want to turn off all dp displays before doing detection */ dc->link_srv->blank_all_dp_displays(dc); From 3ac73f1ef2b403048c98fdf0f29ba32571efb849 Mon Sep 17 00:00:00 2001 From: Igor Kravchenko Date: Fri, 10 Jul 2020 16:24:30 -0400 Subject: [PATCH 080/861] drm/amd/display: Set min_width and min_height capability for DCN30 Add min_width, min_height fields to dc_plane_cap structure. Set values to 16x16 for discrete ASICs, and 64x64 for others. Reviewed-by: Rodrigo Siqueira Signed-off-by: Igor Kravchenko Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 965f5ceb33f7..67a34cda3774 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -701,7 +701,9 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = 167, .nv12 = 167, .fp16 = 167 - } + }, + 16, + 16 }; static const struct dc_debug_options debug_defaults_drv = { From 255ce8f7c169fa385ad04f01c7babab5f636a3ce Mon Sep 17 00:00:00 2001 From: Po-Ting Chen Date: Fri, 26 Feb 2021 15:48:02 +0800 Subject: [PATCH 081/861] drm/amd/display: update GSP1 generic info packet for PSRSU Base on PSRSU specification, every seletive update frame need to use two SDP to indicate the frame active range. So we occupy another GSP1 for PSRSU execution. Reviewed-by: Rodrigo Siqueira Signed-off-by: Po-Ting Chen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/dcn30/dcn30_dio_stream_encoder.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 9d08127d209b..005dbe099a7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -436,6 +436,21 @@ void enc3_stream_encoder_update_dp_info_packets( &info_frame->vsc, true); } + /* TODO: VSC SDP at packetIndex 1 should be retricted only if PSR-SU on. + * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU. + * In addition, currently the driver check the valid bit then update and + * send the corresponding Infopacket. For PSR-SU, the SDP only be sent + * while entering PSR-SU mode. So we need another parameter(e.g. send) + * in dc_info_packet to indicate which infopacket should be enabled by + * default here. + */ + if (info_frame->vsc.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 1, /* packetIndex */ + &info_frame->vsc, + true); + } /* TODO: VSC SDP at packetIndex 1 should be restricted only if PSR-SU on. * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU. * In addition, currently the driver check the valid bit then update and From 58e67bb3c131da5ee14e4842b08e53f4888dce0a Mon Sep 17 00:00:00 2001 From: Zhongwei Date: Fri, 28 Oct 2022 13:40:31 +0800 Subject: [PATCH 082/861] drm/amd/display: fix dpms_off issue when disabling bios mode [Why] disable_vbios_mode_if_required() will set dpms_off to false during boot when pixel clk dismatches with driver requires. This will cause extra backlight on and off if OS call 2 times setmode. [How] Set dpms_off to true to keep power_off and let OS control backlight by display's powerState. Reviewed-by: Rodrigo Siqueira Signed-off-by: Zhongwei Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 631c6b10562e..308b604cb791 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1268,7 +1268,7 @@ static void disable_vbios_mode_if_required( if (pix_clk_100hz != requested_pix_clk_100hz) { dc->link_srv->set_dpms_off(pipe); - pipe->stream->dpms_off = false; + pipe->stream->dpms_off = true; } } } From 710cc1e7cd461446a9325c9bd1e9a54daa462952 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 2 May 2019 13:21:48 -0400 Subject: [PATCH 083/861] drm/amd/display: Explicitly specify update type per plane info change [Why] The bit for flip addr is being set causing the determination for FAST vs MEDIUM to always return MEDIUM when plane info is provided as a surface update. This causes extreme stuttering for the typical atomic update path on Linux. [How] Don't use update_flags->raw for determining FAST vs MEDIUM. It's too fragile to changes like this. Explicitly specify the update type per update flag instead. It's not as clever as checking the bits itself but at least it's correct. Fixes: aa5fdb1ab5b6 ("drm/amd/display: Explicitly specify update type per plane info change") Reviewed-by: Rodrigo Siqueira Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 308b604cb791..c10243ef93b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2482,9 +2482,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc, enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; - if (u->flip_addr) - update_flags->bits.addr_update = 1; - if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; From 4f63b7a59926eb7fb50091e796170a10a8ef4091 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 17:08:53 -0400 Subject: [PATCH 084/861] drm/amd/display: Add FAMS capability to DCN31 DCN31 supports FAMS, but this was not correctly set to the hardware setup sequence. This commit fixes this issue by setting the MCLK switch capability based on the feature capability retrieved from the DMUB. Reviewed-by: Rodrigo Siqueira Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 890268d95495..55494730e500 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -295,6 +295,7 @@ void dcn31_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } void dcn31_dsc_pg_control( From f828b681d0cd566f86351c0b913e6cb6ed8c7b9c Mon Sep 17 00:00:00 2001 From: hackyzh002 Date: Wed, 19 Apr 2023 20:20:58 +0800 Subject: [PATCH 085/861] drm/radeon: Fix integer overflow in radeon_cs_parser_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The type of size is unsigned, if size is 0x40000000, there will be an integer overflow, size will be zero after size *= sizeof(uint32_t), will cause uninitialized memory to be referenced later Reviewed-by: Christian König Signed-off-by: hackyzh002 Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_cs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 46a27ebf4588..a6700d7278bf 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -270,7 +270,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; uint64_t *chunk_array_ptr; - unsigned size, i; + u64 size; + unsigned i; u32 ring = RADEON_CS_RING_GFX; s32 priority = 0; From 87c2213e85bd81e4a9a4d0880c256568794ae388 Mon Sep 17 00:00:00 2001 From: hackyzh002 Date: Wed, 19 Apr 2023 20:22:33 +0800 Subject: [PATCH 086/861] drm/amdgpu: Fix integer overflow in amdgpu_cs_pass1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The type of size is unsigned int, if size is 0x40000000, there will be an integer overflow, size will be zero after size *= sizeof(uint32_t), will cause uninitialized memory to be referenced later. Reviewed-by: Christian König Signed-off-by: hackyzh002 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d8b3c9198d33..a8ec5ff41fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -195,7 +195,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, uint64_t *chunk_array_user; uint64_t *chunk_array; uint32_t uf_offset = 0; - unsigned int size; + size_t size; int ret; int i; From 053065a43ca6466575d9d3c9110e305fdcf303d3 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 6 Apr 2023 15:01:32 -0400 Subject: [PATCH 087/861] drm/amd/display: Add FAMS related definitions and documenation for enum fields [Why&How] Add Enum and documenation related to FAMS (Firmware Assisted Memclk Switching) and CAB (Cache As Buffer) Reviewed-by: Qingqing Zhuo Reviewed-by: Leo Li Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 54b7786f5681..b32a5c977d17 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -376,6 +376,7 @@ enum dmub_fw_boot_status_bit { DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */ DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */ DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */ + DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/ DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/ DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */ }; @@ -989,16 +990,25 @@ struct dmub_rb_cmd_mall { }; /** - * enum dmub_cmd_cab_type - TODO: + * enum dmub_cmd_cab_type - CAB command data. */ enum dmub_cmd_cab_type { + /** + * No idle optimizations (i.e. no CAB) + */ DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION = 0, + /** + * No DCN requests for memory + */ DMUB_CMD__CAB_NO_DCN_REQ = 1, + /** + * Fit surfaces in CAB (i.e. CAB enable) + */ DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2, }; /** - * struct dmub_rb_cmd_cab_for_ss - TODO: + * struct dmub_rb_cmd_cab - CAB command data. */ struct dmub_rb_cmd_cab_for_ss { struct dmub_cmd_header header; @@ -1006,6 +1016,9 @@ struct dmub_rb_cmd_cab_for_ss { uint8_t debug_bits; /* debug bits */ }; +/** + * Enum for indicating which MCLK switch mode per pipe + */ enum mclk_switch_mode { NONE = 0, FPO = 1, From 4371fa7795dabb422171de5e8d9fe7aa3e2edb86 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 2 Apr 2023 22:20:20 -0400 Subject: [PATCH 088/861] drm/amd/display: 3.2.231 This DC version brings along: - FW Release 0.0.162.0 - Enable FPO+Vactivate - Support for VESA SCR - Refactor DMUB commands - Fixes in secure display, modeset, memleak and more - Picked up missed patches in history Acked-by: Qingqing Zhuo Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index edef5d181590..b45974a2dec3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.230" +#define DC_VER "3.2.231" #define MAX_SURFACES 3 #define MAX_PLANES 6 From 0026c273e68ee82a7843f5de26147357bc7e6551 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 20 Apr 2023 10:47:01 +0200 Subject: [PATCH 089/861] drm/amd/display: dumb_abm_lcd: avoid missing-prototype warnings The dmub_abm_set_ambient_level() function has no caller and can just be removed, the other ones have a declaration in the header file and just need to see the prototype: drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:122:14: error: no previous prototype for function 'dmub_abm_get_current_backlight' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:133:14: error: no previous prototype for function 'dmub_abm_get_target_backlight' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:144:6: error: no previous prototype for function 'dmub_abm_set_level' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:163:6: error: no previous prototype for function 'dmub_abm_set_ambient_level' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:183:6: error: no previous prototype for function 'dmub_abm_init_config' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:213:6: error: no previous prototype for function 'dmub_abm_set_pause' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:231:6: error: no previous prototype for function 'dmub_abm_set_pipe' [-Werror,-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/../display/dc/dce/dmub_abm_lcd.c:251:6: error: no previous prototype for function 'dmub_abm_set_backlight_level' [-Werror,-Wmissing-prototypes] Fixes: b8fe56375f78 ("drm/amd/display: Refactor ABM feature") Signed-off-by: Arnd Bergmann Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 22 +------------------ 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index e152c68edfd1..39da73eba86e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -24,6 +24,7 @@ */ #include "dmub_abm.h" +#include "dmub_abm_lcd.h" #include "dce_abm.h" #include "dc.h" #include "dc_dmub_srv.h" @@ -159,27 +160,6 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) return true; } -#ifndef TRIM_AMBIENT_GAMMA -void dmub_abm_set_ambient_level(struct abm *abm, unsigned int ambient_lux, uint8_t panel_mask) -{ - union dmub_rb_cmd cmd; - struct dc_context *dc = abm->ctx; - - if (ambient_lux > 0xFFFF) - ambient_lux = 0xFFFF; - - memset(&cmd, 0, sizeof(cmd)); - cmd.abm_set_ambient_level.header.type = DMUB_CMD__ABM; - cmd.abm_set_ambient_level.header.sub_type = DMUB_CMD__ABM_SET_AMBIENT_LEVEL; - cmd.abm_set_ambient_level.abm_set_ambient_level_data.ambient_lux = ambient_lux; - cmd.abm_set_ambient_level.abm_set_ambient_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; - cmd.abm_set_ambient_level.abm_set_ambient_level_data.panel_mask = panel_mask; - cmd.abm_set_ambient_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_ambient_level_data); - - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); -} -#endif - void dmub_abm_init_config(struct abm *abm, const char *src, unsigned int bytes, From 8f7f1b020ea641f21e7c88db55170a4d15834668 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Fri, 14 Apr 2023 14:26:27 -0400 Subject: [PATCH 090/861] drm/amd/display: fix flickering caused by S/G mode Currently, on a handful of ASICs. We allow the framebuffer for a given plane to exist in either VRAM or GTT. However, if the plane's new framebuffer is in a different memory domain than it's previous framebuffer, flipping between them can cause the screen to flicker. So, to fix this, don't perform an immediate flip in the aforementioned case. Cc: stable@vger.kernel.org Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2354 Reviewed-by: Roman Li Fixes: 81d0bcf99009 ("drm/amdgpu: make display pinning more flexible (v2)") Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 73b52bf76461..8a3c04ff9bff 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7901,6 +7901,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); } +static inline uint32_t get_mem_type(struct drm_framebuffer *fb) +{ + struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); + + return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; +} + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dc_state *dc_state, struct drm_device *dev, @@ -8043,11 +8050,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* * Only allow immediate flips for fast updates that don't - * change FB pitch, DCC state, rotation or mirroing. + * change memory domain, FB pitch, DCC state, rotation or + * mirroring. */ bundle->flip_addrs[planes_count].flip_immediate = crtc->state->async_flip && - acrtc_state->update_type == UPDATE_TYPE_FAST; + acrtc_state->update_type == UPDATE_TYPE_FAST && + get_mem_type(old_plane_state->fb) == get_mem_type(fb); timestamp_ns = ktime_get_ns(); bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); From 89d8445e4f5cfaf5c017c0bafb05f8bbaac59bf9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Mar 2023 12:22:49 -0400 Subject: [PATCH 091/861] drm/amdgpu/gfx11: add FW version check for new CP GFX shadow feature Use this to determine if we support the new SET_Q_PREEMPTION_MODE packet. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index a2d311a4da5a..6c5f58a34e84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -380,6 +380,8 @@ struct amdgpu_gfx { struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS]; struct amdgpu_ring_mux muxer; + bool cp_gfx_shadow; /* for gfx11 */ + enum amdgpu_gfx_partition partition_mode; uint32_t num_xcd; uint32_t num_xcc_per_xcp; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d3c89e6c0c03..f1ad6bd1eda8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -463,6 +463,18 @@ out: return err; } +static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + default: + adev->gfx.cp_gfx_shadow = false; + break; + } +} + static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { char fw_name[40]; @@ -539,6 +551,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) /* only one MEC for gfx 11.0.0. */ adev->gfx.mec2_fw = NULL; + gfx_v11_0_check_fw_cp_gfx_shadow(adev); out: if (err) { amdgpu_ucode_release(&adev->gfx.pfp_fw); From 31d7c3a4fc3d312a0646990767647925d5bde540 Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Fri, 21 Apr 2023 14:20:38 +0800 Subject: [PATCH 092/861] drm/amdgpu: fix memory leak in mes self test The fences associated with mes queue have to be freed up during amdgpu_ring_fini. Signed-off-by: Jack Xiao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index f676c236b657..eaf0f82757ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -390,6 +390,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, (void **)&ring->ring); + } else { + kfree(ring->fence_drv.fences); } dma_fence_put(ring->vmid_wait); From b418e7193965a5f064a0e422ccf25e6203f265b5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Mar 2023 13:22:44 -0400 Subject: [PATCH 093/861] drm/amdgpu/gfx11: check the CP FW version CP GFX shadow support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only set the supported flag if we have new enough CP FW. v2: update to the final firmware versions Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f1ad6bd1eda8..2b63380ec75a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -469,6 +469,11 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): + if ((adev->gfx.me_fw_version >= 1505) && + (adev->gfx.pfp_fw_version >= 1600) && + (adev->gfx.mec_fw_version >= 512)) + adev->gfx.cp_gfx_shadow = true; + break; default: adev->gfx.cp_gfx_shadow = false; break; From 043dc33f443fd7abaf3fe076897503ce3d5dbc26 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 13:48:11 -0500 Subject: [PATCH 094/861] drm/amdgpu/UAPI: add new CS chunk for GFX shadow buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For GFX11, the UMD needs to allocate some shadow buffers to be used for preemption. The UMD allocates the buffers and passes the GPU virtual address to the kernel since the kernel will program the packet that specified these addresses as part of its IB submission frame. v2: UMD passes shadow init to tell kernel when to initialize the shadow Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 6981e59a9401..fc094653b13f 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -592,6 +592,7 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08 #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09 +#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -708,6 +709,15 @@ struct drm_amdgpu_cs_chunk_data { }; }; +#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1 + +struct drm_amdgpu_cs_chunk_cp_gfx_shadow { + __u64 shadow_va; + __u64 csa_va; + __u64 gds_va; + __u64 flags; +}; + /* * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU * From ac9287055ff16a092416c76a19006764e4c6a978 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 9 Mar 2023 15:40:48 -0500 Subject: [PATCH 095/861] drm/amdgpu: add gfx shadow CS IOCTL support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for submitting the shadow update packet when submitting an IB. Needed for MCBP on GFX11. v2: update API for CSA (Alex) v3: fix ordering; SET_Q_PREEMPTION_MODE most come before COND_EXEC Add missing check for AMDGPU_CHUNK_ID_CP_GFX_SHADOW in amdgpu_cs_pass1() Only initialize shadow on first use (Alex) v4: Pass parameters rather than job to new ring callback (Alex) v5: squash in change to call SET_Q_PREEMPTION_MODE/COND_EXEC before RELEASE_MEM to complete the UMDs use of the shadow (Alex) Reviewed-by: Christian König Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 26 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 29 +++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 6 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 3 +++ 4 files changed, 63 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a8ec5ff41fc0..c3b3287dc29e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -288,6 +288,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: + case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: break; default: @@ -578,6 +579,26 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, return 0; } +static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata; + int i; + + if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW) + return -EINVAL; + + for (i = 0; i < p->gang_size; ++i) { + p->jobs[i]->shadow_va = shadow->shadow_va; + p->jobs[i]->csa_va = shadow->csa_va; + p->jobs[i]->gds_va = shadow->gds_va; + p->jobs[i]->init_shadow = + shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW; + } + + return 0; +} + static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) { unsigned int ce_preempt = 0, de_preempt = 0; @@ -620,6 +641,11 @@ static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) if (r) return r; break; + case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: + r = amdgpu_cs_p2_shadow(p, chunk); + if (r) + return r; + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index ef96ff2f4272..aebc0e5bddc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -136,7 +136,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, uint64_t fence_ctx; uint32_t status = 0, alloc_size; unsigned fence_flags = 0; - bool secure; + bool secure, init_shadow; + u64 shadow_va, csa_va, gds_va; + int vmid = AMDGPU_JOB_GET_VMID(job); unsigned i; int r = 0; @@ -150,9 +152,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, vm = job->vm; fence_ctx = job->base.s_fence ? job->base.s_fence->scheduled.context : 0; + shadow_va = job->shadow_va; + csa_va = job->csa_va; + gds_va = job->gds_va; + init_shadow = job->init_shadow; } else { vm = NULL; fence_ctx = 0; + shadow_va = 0; + csa_va = 0; + gds_va = 0; + init_shadow = false; } if (!ring->sched.ready && !ring->is_mes_queue) { @@ -212,6 +222,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } amdgpu_ring_ib_begin(ring); + + if (job && ring->funcs->emit_gfx_shadow) + amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, + init_shadow, vmid); + if (job && ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); @@ -263,6 +278,18 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, fence_flags | AMDGPU_FENCE_FLAG_64BIT); } + if (ring->funcs->emit_gfx_shadow) { + amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); + + if (ring->funcs->init_cond_exec) { + unsigned ce_offset = ~0; + + ce_offset = amdgpu_ring_init_cond_exec(ring); + if (ce_offset != ~0 && ring->funcs->patch_cond_exec) + amdgpu_ring_patch_cond_exec(ring, ce_offset); + } + } + r = amdgpu_fence_emit(ring, f, job, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 52f2e313ea17..3f9804f956c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -67,6 +67,12 @@ struct amdgpu_job { uint64_t uf_addr; uint64_t uf_sequence; + /* virtual addresses for shadow/GDS/CSA */ + uint64_t shadow_va; + uint64_t csa_va; + uint64_t gds_va; + bool init_shadow; + /* job_run_counter >= 1 means a resubmit job */ uint32_t job_run_counter; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 8eca6532ed19..2aa6cc1c4212 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -212,6 +212,8 @@ struct amdgpu_ring_funcs { void (*end_use)(struct amdgpu_ring *ring); void (*emit_switch_buffer) (struct amdgpu_ring *ring); void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); + void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va, + u64 gds_va, bool init_shadow, int vmid); void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t reg_val_offs); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); @@ -309,6 +311,7 @@ struct amdgpu_ring { #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) +#define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v))) #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) From 46c1282e5a31c33a6973affbcc765eac1f3f5d1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 8 Mar 2023 21:17:59 +0100 Subject: [PATCH 096/861] drm/amdgpu: add gfx11 emit shadow callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add ring callback for gfx to update the CP firmware with the new shadow information before we process the IB. v2: add implementation for new packet (Alex) v3: add current FW version checks (Alex) v4: only initialize shadow on first use Only set IB_VMID when a valid shadow buffer is present (Alex) v5: Pass parameters rather than job to new ring callback (Alex) Reviewed-by: Christian König Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/nvd.h | 5 ++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 2b63380ec75a..b8b2886aac92 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5608,6 +5608,29 @@ static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0); } +static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring, + u64 shadow_va, u64 csa_va, + u64 gds_va, bool init_shadow, + int vmid) +{ + struct amdgpu_device *adev = ring->adev; + + if (!adev->gfx.cp_gfx_shadow) + return; + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7)); + amdgpu_ring_write(ring, lower_32_bits(shadow_va)); + amdgpu_ring_write(ring, upper_32_bits(shadow_va)); + amdgpu_ring_write(ring, lower_32_bits(gds_va)); + amdgpu_ring_write(ring, upper_32_bits(gds_va)); + amdgpu_ring_write(ring, lower_32_bits(csa_va)); + amdgpu_ring_write(ring, upper_32_bits(csa_va)); + amdgpu_ring_write(ring, shadow_va ? + PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0); + amdgpu_ring_write(ring, init_shadow ? + PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0); +} + static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) { unsigned ret; @@ -6228,6 +6251,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .set_wptr = gfx_v11_0_ring_set_wptr_gfx, .emit_frame_size = /* totally 242 maximum if 16 IBs */ 5 + /* COND_EXEC */ + 9 + /* SET_Q_PREEMPTION_MODE */ 7 + /* PIPELINE_SYNC */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + @@ -6254,6 +6278,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, + .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow, .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec, .preempt_ib = gfx_v11_0_ring_preempt_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h index fd6b58243b03..631dafb92299 100644 --- a/drivers/gpu/drm/amd/amdgpu/nvd.h +++ b/drivers/gpu/drm/amd/amdgpu/nvd.h @@ -462,6 +462,9 @@ # define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25) #define PACKET3_RUN_LIST 0xA5 #define PACKET3_MAP_PROCESS_VM 0xA6 - +/* GFX11 */ +#define PACKET3_SET_Q_PREEMPTION_MODE 0xF0 +# define PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(x) ((x) << 0) +# define PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM (1 << 0) #endif From 38be7796f310cd2bc84dcc40c4fd1964df39a5b0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 16 Mar 2023 11:33:43 -0400 Subject: [PATCH 097/861] drm/amdgpu: don't require a job for cond_exec and shadow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to reset the shadow state every time we submit an IB and there needs to be a COND_EXEC packet after the SET_Q_PREEMPTION_MODE packet for it to work properly, so we should emit both of these packets regardless of whether there is a job present or not. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index aebc0e5bddc6..c955c3f060cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -223,11 +223,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, amdgpu_ring_ib_begin(ring); - if (job && ring->funcs->emit_gfx_shadow) + if (ring->funcs->emit_gfx_shadow) amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, init_shadow, vmid); - if (job && ring->funcs->init_cond_exec) + if (ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); amdgpu_device_flush_hdp(adev, ring); From edd9038000352ba846cba9dfb84d8c397c3b6499 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 13:43:13 -0500 Subject: [PATCH 098/861] drm/amdgpu: add UAPI to query GFX shadow sizes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add UAPI to query the GFX shadow buffer requirements for preemption on GFX11. UMDs need to specify the shadow areas for preemption. v2: move into existing asic info query drop GDS as its use is determined by the UMD (Marek) v3: Update comments to note that alignment is base virtual alignment (Alex) Reviewed-by: Marek Olšák Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index fc094653b13f..cc78528c3b4b 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -1138,6 +1138,14 @@ struct drm_amdgpu_info_device { __u64 mall_size; /* AKA infinity cache */ /* high 32 bits of the rb pipes mask */ __u32 enabled_rb_pipes_mask_hi; + /* shadow area size for gfx11 */ + __u32 shadow_size; + /* shadow area base virtual alignment for gfx11 */ + __u32 shadow_alignment; + /* context save area size for gfx11 */ + __u32 csa_size; + /* context save area base virtual alignment for gfx11 */ + __u32 csa_alignment; }; struct drm_amdgpu_info_hw_ip { From 0db0c0379d15cd811214bdb631a0b6bdcdd22c84 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 14:56:07 -0500 Subject: [PATCH 099/861] drm/amdgpu: add gfx shadow callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To provide IP specific shadow sizes. UMDs will use this to query the kernel driver for the size of the shadow buffers. v2: make callback return an int (Alex) v3: drop GDS (Alex) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6c5f58a34e84..f1a2ce7b6aca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -233,6 +233,13 @@ struct amdgpu_gfx_ras { struct amdgpu_iv_entry *entry); }; +struct amdgpu_gfx_shadow_info { + u32 shadow_size; + u32 shadow_alignment; + u32 csa_size; + u32 csa_alignment; +}; + struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); @@ -250,6 +257,8 @@ struct amdgpu_gfx_funcs { u32 queue, u32 vmid); void (*init_spm_golden)(struct amdgpu_device *adev); void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); + int (*get_gfx_shadow_info)(struct amdgpu_device *adev, + struct amdgpu_gfx_shadow_info *shadow_info); }; struct sq_work { @@ -391,6 +400,7 @@ struct amdgpu_gfx { #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) +#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) /** * amdgpu_gfx_create_bitmask - create a bitmask From 02527099ddc74244b9d94c93ec54e123fcee5899 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 15:10:14 -0500 Subject: [PATCH 100/861] drm/amdgpu: add get_gfx_shadow_info callback for gfx11 Used to get the size and alignment requirements for the gfx shadow buffer for preemption. v2: use FW version check to determine whether to return a valid size here return an error if not supported (Alex) v3: drop GDS (Alex) v4: make amdgpu_gfx_shadow_info mandatory (Alex) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index b8b2886aac92..543af07ff102 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -818,6 +818,27 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, soc21_grbm_select(adev, me, pipe, q, vm); } +/* all sizes are in bytes */ +#define MQD_SHADOW_BASE_SIZE 73728 +#define MQD_SHADOW_BASE_ALIGNMENT 256 +#define MQD_FWWORKAREA_SIZE 484 +#define MQD_FWWORKAREA_ALIGNMENT 256 + +static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, + struct amdgpu_gfx_shadow_info *shadow_info) +{ + if (adev->gfx.cp_gfx_shadow) { + shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; + shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; + shadow_info->csa_size = MQD_FWWORKAREA_SIZE; + shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; + return 0; + } else { + memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); + return -ENOTSUPP; + } +} + static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, .select_se_sh = &gfx_v11_0_select_se_sh, @@ -826,6 +847,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, + .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info, }; static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) From 1ba91b54a9051205c2110ed43a7dc5650d49ca0e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 15:28:25 -0500 Subject: [PATCH 101/861] drm/amdgpu: add support for new GFX shadow size query Use the new callback to fetch the data. Return an error if not supported. UMDs should use this query to check whether shadow buffers are supported and if so what size they should be. v2: return an error rather than a zerod structure. v3: drop GDS, move into dev_info structure. Data will be 0 if not supported. v4: drop local variable r Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a5bae7eb993a..1d3b224b8b28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -876,6 +876,19 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu; dev_info->mall_size = adev->gmc.mall_size; + + if (adev->gfx.funcs->get_gfx_shadow_info) { + struct amdgpu_gfx_shadow_info shadow_info; + + ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info); + if (!ret) { + dev_info->shadow_size = shadow_info.shadow_size; + dev_info->shadow_alignment = shadow_info.shadow_alignment; + dev_info->csa_size = shadow_info.csa_size; + dev_info->csa_alignment = shadow_info.csa_alignment; + } + } + ret = copy_to_user(out, dev_info, min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; kfree(dev_info); From 550e5d23f14784e2a625c25fe0c9d498589c9256 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Tue, 4 Apr 2023 13:44:18 -0400 Subject: [PATCH 102/861] drm/amd/display: assign edid_blob_ptr with edid from debugfs [Why] implementation change of drm_edid_override_set since linux kernel 6.1, edid from debugfs is saved into connector->edid_override immediatey, not saved to connector->edid_blob_ptr at the same time. [How] call new drm_edid function drm_connector_update_edid_property to assign connector->edid_blob_ptr with override edid from debugfs. Tested-by: Daniel Wheeler Reviewed-by: Chao-kai Wang Acked-by: Rodrigo Siqueira Signed-off-by: Hersen Wu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8a3c04ff9bff..deb740466b19 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6373,11 +6373,20 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) struct edid *edid; if (!aconnector->base.edid_blob_ptr) { - DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", - aconnector->base.name); + /* if connector->edid_override valid, pass + * it to edid_override to edid_blob_ptr + */ + int count; - aconnector->base.force = DRM_FORCE_OFF; - return; + count = drm_edid_override_connector_update(&aconnector->base); + + if (!aconnector->base.edid_blob_ptr) { + DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", + aconnector->base.name); + + aconnector->base.force = DRM_FORCE_OFF; + return; + } } edid = (struct edid *) aconnector->base.edid_blob_ptr->data; From 9fa8cc0c444562fa19e20ca20f1c70e15b9d8c13 Mon Sep 17 00:00:00 2001 From: Michael Strauss Date: Mon, 3 Apr 2023 11:43:50 -0400 Subject: [PATCH 103/861] drm/amd/display: Convert Delaying Aux-I Disable To Monitor Patch [WHY] 32ms delay was added to resolve issue with a specific sink, however this same delay also introduces erroneous link training failures with certain sink devices. [HOW] Only apply the 32ms delay for offending devices instead of globally. Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Michael Strauss Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 - drivers/gpu/drm/amd/display/dc/dc_types.h | 1 + .../link_dp_training_fixed_vs_pe_retimer.c | 17 +++++++++++------ 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index b45974a2dec3..97747f5fde56 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -855,7 +855,6 @@ struct dc_debug_options { bool force_usr_allow; /* uses value at boot and disables switch */ bool disable_dtb_ref_clk_switch; - uint32_t fixed_vs_aux_delay_config_wa; bool extended_blank_optimization; union aux_wake_wa_options aux_wake_wa; uint32_t mst_start_top_delay; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 34c848311455..150c19286d67 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -196,6 +196,7 @@ struct dc_panel_patch { unsigned int disable_fams; unsigned int skip_avmute; unsigned int mst_start_top_delay; + unsigned int delay_disable_aux_intercept_ms; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 5731c4b61f9f..fb6c938c6dab 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -233,7 +233,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + uint32_t pre_disable_intercept_delay_ms = + link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; uint32_t vendor_lttpr_write_address = 0xF004F; @@ -259,7 +260,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( /* Certain display and cable configuration require extra delay */ if (offset > 2) - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; } /* Vendor specific: Reset lane settings */ @@ -380,7 +381,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( 0); /* Vendor specific: Disable intercept */ for (i = 0; i < max_vendor_dpcd_retries; i++) { - msleep(pre_disable_intercept_delay_ms); + if (pre_disable_intercept_delay_ms != 0) + msleep(pre_disable_intercept_delay_ms); dpcd_status = core_link_write_dpcd( link, vendor_lttpr_write_address, @@ -591,9 +593,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint32_t pre_disable_intercept_delay_ms = + link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; + uint32_t vendor_lttpr_write_address = 0xF004F; enum link_training_result status = LINK_TRAINING_SUCCESS; @@ -618,7 +622,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( /* Certain display and cable configuration require extra delay */ if (offset > 2) - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; } /* Vendor specific: Reset lane settings */ @@ -739,7 +743,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( 0); /* Vendor specific: Disable intercept */ for (i = 0; i < max_vendor_dpcd_retries; i++) { - msleep(pre_disable_intercept_delay_ms); + if (pre_disable_intercept_delay_ms != 0) + msleep(pre_disable_intercept_delay_ms); dpcd_status = core_link_write_dpcd( link, vendor_lttpr_write_address, From 3af470cbcc9f40e47fe9b16882f60cd20b438095 Mon Sep 17 00:00:00 2001 From: Xiaogang Chen Date: Fri, 21 Apr 2023 13:35:01 -0500 Subject: [PATCH 104/861] drm/amdkfd: Fix an issue at userptr buffer validation process. amdgpu_ttm_tt_get_user_pages can fail(-EFAULT). If it failed mem has no associated hmm range or user_pages associated. Keep it at process_info->userptr_inval_list and mark mem->invalid until following scheduled attempts can valid it. Signed-off-by: Xiaogang Chen Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 83a83ced2439..4432e169fae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2445,7 +2445,9 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ret = -EAGAIN; goto unlock_out; } - mem->invalid = 0; + /* set mem valid if mem has hmm range associated */ + if (mem->range) + mem->invalid = 0; } unlock_out: @@ -2577,8 +2579,15 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, validate_list.head) { - bool valid = amdgpu_ttm_tt_get_user_pages_done( - mem->bo->tbo.ttm, mem->range); + bool valid; + + /* keep mem without hmm range at userptr_inval_list */ + if (!mem->range) + continue; + + /* Only check mem with hmm range associated */ + valid = amdgpu_ttm_tt_get_user_pages_done( + mem->bo->tbo.ttm, mem->range); mem->range = NULL; if (!valid) { @@ -2586,7 +2595,12 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i ret = -EAGAIN; continue; } - WARN(mem->invalid, "Valid BO is marked invalid"); + + if (mem->invalid) { + WARN(1, "Valid BO is marked invalid"); + ret = -EAGAIN; + continue; + } list_move_tail(&mem->validate_list.head, &process_info->userptr_valid_list); From 8a93c691248e7ff2b3944107a1ead2671b6854f2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 10 Mar 2023 13:17:11 -0500 Subject: [PATCH 105/861] drm/amdgpu: bump driver version number for CP GFX shadow So UMDs can determine whether the kernel supports this. Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21986 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b400d598b75a..b987022e11b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -110,9 +110,10 @@ * 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields: * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size, * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi + * 3.53.0 - Support for GFX11 CP GFX shadowing */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 52 +#define KMS_DRIVER_MINOR 53 #define KMS_DRIVER_PATCHLEVEL 0 unsigned int amdgpu_vram_limit = UINT_MAX; From b185c31847856d9fa3008f727a824db163df0801 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 21 Mar 2023 13:28:33 -0400 Subject: [PATCH 106/861] drm/amdgpu: track MQD size for gfx and compute MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It varies by generation and we need to know the size to expose this via debugfs. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 465ad0b7cddb..60bb4bba1994 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -420,6 +420,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, return r; } + ring->mqd_size = mqd_size; /* prepare MQD backup */ adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.me.mqd_backup[i]) @@ -440,6 +441,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, return r; } + ring->mqd_size = mqd_size; /* prepare MQD backup */ adev->gfx.mec.mqd_backup[i + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[i]) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 2aa6cc1c4212..b0dc0a0c2631 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -258,6 +258,7 @@ struct amdgpu_ring { struct amdgpu_bo *mqd_obj; uint64_t mqd_gpu_addr; void *mqd_ptr; + unsigned mqd_size; uint64_t eop_gpu_addr; u32 doorbell_index; bool use_doorbell; From 445d85e3c1dfd8c45b24be6f1527f1e117256d0e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 21 Mar 2023 13:59:13 -0400 Subject: [PATCH 107/861] drm/amdgpu: add debugfs interface for reading MQDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide a debugfs interface to access the MQD. Useful for debugging issues with the CP and MES hardware scheduler. v2: fix missing unreserve/unmap when pos >= size (Alex) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 59 ++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index eaf0f82757ef..1e9ae3bc59d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -509,6 +509,59 @@ static const struct file_operations amdgpu_debugfs_ring_fops = { .llseek = default_llseek }; +static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_ring *ring = file_inode(f)->i_private; + volatile u32 *mqd; + int r; + uint32_t value, result; + + if (*pos & 3 || size & 3) + return -EINVAL; + + result = 0; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); + if (r) { + amdgpu_bo_unreserve(ring->mqd_obj); + return r; + } + + while (size) { + if (*pos >= ring->mqd_size) + goto done; + + value = mqd[*pos/4]; + r = put_user(value, (uint32_t *)buf); + if (r) + goto done; + buf += 4; + result += 4; + size -= 4; + *pos += 4; + } + +done: + amdgpu_bo_kunmap(ring->mqd_obj); + mqd = NULL; + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) + return r; + + return result; +} + +static const struct file_operations amdgpu_debugfs_mqd_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_mqd_read, + .llseek = default_llseek +}; + #endif void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, @@ -524,6 +577,12 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, &amdgpu_debugfs_ring_fops, ring->ring_size + 12); + if (ring->mqd_obj) { + sprintf(name, "amdgpu_mqd_%s", ring->name); + debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, + &amdgpu_debugfs_mqd_fops, + ring->mqd_size); + } #endif } From 93aac179a44be000aa8a025963011c752c23d92e Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 8 Apr 2023 02:51:46 -0400 Subject: [PATCH 108/861] drm/amd/display: [FW Promotion] Release 0.0.163.0 Add feature caps for Subvp + PSR so driver is back compatible Tested-by: Daniel Wheeler Acked-by: Rodrigo Siqueira Signed-off-by: Anthony Koo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index b32a5c977d17..dcec8955bde4 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -257,7 +257,8 @@ struct dmub_feature_caps { */ uint8_t psr; uint8_t fw_assisted_mclk_switch; - uint8_t reserved[6]; + uint8_t subvp_psr_support; + uint8_t reserved[5]; }; struct dmub_visual_confirm_color { From 267e2d8e8e60ed59e74cf888f6b74bd5202e7384 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 9 Apr 2023 23:08:25 -0400 Subject: [PATCH 109/861] drm/amd/display: 3.2.232 Acked-by: Rodrigo Siqueira Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 97747f5fde56..d6f0ecb708cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.231" +#define DC_VER "3.2.232" #define MAX_SURFACES 3 #define MAX_PLANES 6 From 0ba4a784a14592abed41873e339eab78ceb6e230 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Wed, 5 Apr 2023 13:47:41 -0600 Subject: [PATCH 110/861] drm/amd/display: implement force function in amdgpu_dm_connector_funcs [Why] When userspace (IGT) inserts EDID with audio data (ELD), EDID is not updated. [How] Implements force function (amdgpu_dm_connector_funcs_force) in amdgpu_dm_connector_funcs to create emulated sink and to handle EDID. Tested-by: Daniel Wheeler Reviewed-by: Hersen Wu Acked-by: Rodrigo Siqueira Signed-off-by: Alex Hung Signed-off-by: Wenchieh Chien Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 28 ++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index deb740466b19..b265434c77e1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6346,6 +6346,31 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) return 0; } +void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_em_sink = aconnector->dc_em_sink; + struct edid *edid; + + if (!connector->edid_override) + return; + + drm_edid_override_connector_update(&aconnector->base); + edid = aconnector->base.edid_blob_ptr->data; + aconnector->edid = edid; + + /* Update emulated (virtual) sink's EDID */ + if (dc_em_sink && dc_link) { + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); + memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + dm_helpers_parse_edid_caps( + dc_link, + &dc_em_sink->dc_edid, + &dc_em_sink->edid_caps); + } +} + static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .reset = amdgpu_dm_connector_funcs_reset, .detect = amdgpu_dm_connector_detect, @@ -6356,7 +6381,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property, .late_register = amdgpu_dm_connector_late_register, - .early_unregister = amdgpu_dm_connector_unregister + .early_unregister = amdgpu_dm_connector_unregister, + .force = amdgpu_dm_connector_funcs_force }; static int get_modes(struct drm_connector *connector) From 278d3de6754e778cb676b7e1b10782eff1971010 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 10 Apr 2023 13:52:21 -0400 Subject: [PATCH 111/861] drm/amd/display: Update FW feature caps struct Reorder FW feature caps struct variable order to ensure backwards compatability is maintained for older FW Tested-by: Daniel Wheeler Reviewed-by: Sung Lee Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index dcec8955bde4..09d4d0d9b92c 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -257,8 +257,9 @@ struct dmub_feature_caps { */ uint8_t psr; uint8_t fw_assisted_mclk_switch; + uint8_t reserved[4]; uint8_t subvp_psr_support; - uint8_t reserved[5]; + uint8_t gecc_enable; }; struct dmub_visual_confirm_color { From 8f3589bb6fcea397775398cba4fbcc46829a60ed Mon Sep 17 00:00:00 2001 From: "JinZe.Xu" Date: Mon, 10 Apr 2023 23:23:37 +0800 Subject: [PATCH 112/861] drm/amd/display: Restore rptr/wptr for DMCUB as workaround [Why] States may be desync after resume. [How] Sync sw state with hw state. Tested-by: Daniel Wheeler Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: JinZe.Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 14 ++++++++++++++ .../gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 5 +++++ .../gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 2 ++ .../gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 5 +++++ .../gpu/drm/amd/display/dmub/src/dmub_dcn31.h | 2 ++ .../gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 5 +++++ .../gpu/drm/amd/display/dmub/src/dmub_dcn32.h | 2 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 17 +++++++++++++++++ 8 files changed, 52 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 719bf9bb168a..d35432c21856 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -340,6 +340,8 @@ struct dmub_srv_hw_funcs { void (*setup_mailbox)(struct dmub_srv *dmub, const struct dmub_region *inbox1); + uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub); + uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub); void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); @@ -600,6 +602,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, */ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); +/** + * dmub_srv_sync_inbox1() - sync sw state with hw state + * @dmub: the dmub service + * + * Sync sw state with hw state when resume from S0i3 + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub); + /** * dmub_srv_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index a6540e27044d..98dad0d47e72 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } +uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_WPTR); +} + uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index c2e5831ac52c..1df128e57ed3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); +uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub); + uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index c90b9ee42e12..ebf7aeec4029 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } +uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_WPTR); +} + uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index f6db6f89d45d..7d5c10ee539b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub, void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); +uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub); + uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index a7d5607459ed..21dd6cbdb106 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); } +uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_WPTR); +} + uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub) { return REG_READ(DMCUB_INBOX1_RPTR); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index 7d1a6eb4d665..f15336b6e22b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -206,6 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub, void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); +uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub); + uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 67c53f7e589c..ea3bed70a229 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -166,6 +166,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->backdoor_load = dmub_dcn20_backdoor_load; funcs->setup_windows = dmub_dcn20_setup_windows; funcs->setup_mailbox = dmub_dcn20_setup_mailbox; + funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; funcs->is_supported = dmub_dcn20_is_supported; @@ -235,6 +236,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->backdoor_load = dmub_dcn31_backdoor_load; funcs->setup_windows = dmub_dcn31_setup_windows; funcs->setup_mailbox = dmub_dcn31_setup_mailbox; + funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr; funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox; @@ -273,6 +275,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode; funcs->setup_windows = dmub_dcn32_setup_windows; funcs->setup_mailbox = dmub_dcn32_setup_mailbox; + funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr; funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr; funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr; funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox; @@ -642,6 +645,20 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { + dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + } + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) { if (!dmub->sw_init) From ee7be8f3de1ccc9665281fe996f9b6d45191ec1a Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 10 Apr 2023 14:37:27 -0400 Subject: [PATCH 113/861] drm/amd/display: Limit DCN32 8 channel or less parts to DPM1 for FPO - Due to hardware related QoS issues, we need to limit certain SKUs with less memory channels to DPM1 and above. - At DPM0 + workload running, the urgent return latency can exceed 15us (the expected maximum is 4us) which results in underflow Cc: stable@vger.kernel.org Tested-by: Daniel Wheeler Reviewed-by: Saaem Rizvi Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 2 ++ .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 15 +++++++++++++++ .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h | 2 ++ 3 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 4f8286ae699b..0085ea78ea31 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1888,6 +1888,8 @@ bool dcn32_validate_bandwidth(struct dc *dc, dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + dcn32_override_min_req_memclk(dc, context); + BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b8a2518faecc..ed7ea4c42412 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2887,3 +2887,18 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) dc_assert_fp_enabled(); dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0; } + +void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context) +{ + // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue) + if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) && + dc->dml.soc.num_chans <= 8) { + int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; + + if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 && + num_mclk_levels > 1) { + context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; + context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index dcf512cd3072..a4206b71d650 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -80,6 +80,8 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req); +void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context); + void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb); #endif From 9ba90d760e9354c124fa9bbea08017d96699a82c Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Fri, 9 Dec 2022 15:13:18 -0500 Subject: [PATCH 114/861] drm/amd/display: add pixel rate based CRB allocation support This feature is meant to unblock PSTATE for certain high end display configs on dcn315. This is achieved by allocating CRB to detile buffer based on display requirements to meet pstate latency hiding needs. Tested-by: Daniel Wheeler Reviewed-by: Charlene Liu Acked-by: Rodrigo Siqueira Signed-off-by: Dmytro Laktyushkin Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn31/dcn31_hubbub.c | 1 + .../amd/display/dc/dcn315/dcn315_resource.c | 97 ++++++++++++++++++- .../drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 25 ++++- .../drm/amd/display/dc/dml/dcn31/dcn31_fpu.h | 3 + .../dc/dml/dcn31/display_mode_vba_31.c | 39 +++++--- .../drm/amd/display/dc/dml/display_mode_vba.c | 6 ++ 6 files changed, 154 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 7e7cd5b64e6a..7445ed27852a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne default: break; } + DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments); /* Should never be hit, if it is we have an erroneous hw config*/ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 41c972c8eb19..42a0157fd813 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -136,6 +136,9 @@ #define DCN3_15_MAX_DET_SIZE 384 #define DCN3_15_CRB_SEGMENT_SIZE_KB 64 +#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB) +/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */ +#define MIN_RESERVED_DET_SEGS 2 enum dcn31_clk_src_array_id { DCN31_CLK_SRC_PLL0, @@ -1636,21 +1639,57 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +static int source_format_to_bpp (enum source_format_class SourcePixelFormat) +{ + if (SourcePixelFormat == dm_444_64) + return 8; + else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) + return 2; + else if (SourcePixelFormat == dm_444_8) + return 1; + else if (SourcePixelFormat == dm_rgbe_alpha) + return 5; + else if (SourcePixelFormat == dm_420_8) + return 3; + else if (SourcePixelFormat == dm_420_12) + return 6; + else + return 4; +} + +static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) +{ + int i; + struct resource_context *res_ctx = &context->res_ctx; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /*Don't apply if MPO to avoid transition issues*/ + if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) + return false; + } + return true; +} + static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) { - int i, pipe_cnt; + int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; + int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; + bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); DC_FP_END(); - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; if (!res_ctx->pipe_ctx[i].stream) @@ -1671,6 +1710,23 @@ static int dcn315_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); + /* Ceil to crb segment size */ + int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( + &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB); + if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) { + bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS; + split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc); + split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + if (split_required) + approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2; + pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate; + remaining_det_segs -= approx_det_segs_required_for_pstate; + } else + remaining_det_segs = -1; + crb_pipes++; + } DC_FP_END(); if (pipes[pipe_cnt].dout.dsc_enable) { @@ -1689,16 +1745,49 @@ static int dcn315_populate_dml_pipes_from_context( break; } } - pipe_cnt++; } + /* Spread remaining unreserved crb evenly among all pipes, use default policy if not enough det or single pipe */ + if (pixel_rate_crb) { + for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!pipe->stream) + continue; + + if (!pipe->top_pipe && !pipe->prev_odm_pipe) { + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + if (remaining_det_segs < 0 || crb_pipes == 1) + pipes[pipe_cnt].pipe.src.det_size_override = 0; + if (remaining_det_segs > MIN_RESERVED_DET_SEGS) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; + } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; + } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + crb_idx++; + } + pipe_cnt++; + } + } + if (pipe_cnt) context->bw_ctx.dml.ip.det_buffer_size_kbytes = (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB; if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; - ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE); + dc->config.enable_4to1MPC = false; if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { if (is_dual_plane(pipe->plane_state->format) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 59836570603a..19d034341e64 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { - int i, pipe_idx, active_hubp_count = 0; + int i, pipe_idx, total_det = 0, active_hubp_count = 0; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; dc_assert_fp_enabled(); @@ -563,6 +563,18 @@ void dcn31_calculate_wm_and_dlg_fp( if (context->res_ctx.pipe_ctx[i].stream) context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; } + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = + get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384) + context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2; + total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb; + pipe_idx++; + } + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det; } void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -815,3 +827,14 @@ int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) { return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0); } + +int dcn_get_approx_det_segs_required_for_pstate( + struct _vcs_dpi_soc_bounding_box_st *soc, + int pix_clk_100hz, int bpp, int seg_size_kb) +{ + /* Roughly calculate required crb to hide latency. In practice there is slightly + * more buffer available for latency hiding + */ + return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp + / 10240000 + seg_size_kb - 1) / seg_size_kb; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index 687d3522cc33..8f9c8faed260 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -47,6 +47,9 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc); +int dcn_get_approx_det_segs_required_for_pstate( + struct _vcs_dpi_soc_bounding_box_st *soc, + int pix_clk_100hz, int bpp, int seg_size_kb); int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 330b089d6a86..01603abd75bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -532,7 +532,8 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - unsigned int DETBufferSizeInKByte, + bool DETSharedByAllDPP, + unsigned int DETBufferSizeInKByte[], double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -3118,7 +3119,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SurfaceWidthC[k], v->SurfaceHeightY[k], v->SurfaceHeightC[k], - v->DETBufferSizeInKByte[0] * 1024, + v->DETBufferSizeInKByte[k] * 1024, v->BlockHeight256BytesY[k], v->BlockHeight256BytesC[k], v->SurfaceTiling[k], @@ -3313,7 +3314,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, dummy1, dummy2, v->SourceScan, @@ -3779,14 +3781,16 @@ static noinline void CalculatePrefetchSchedulePerPlane( &v->VReadyOffsetPix[k]); } -static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte) +static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[]) { int i, total_pipes = 0; for (i = 0; i < NumberOfActivePlanes; i++) total_pipes += NoOfDPPThisState[i]; - *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64; - if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE) - *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE; + DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64; + if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE) + DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE; + for (i = 1; i < NumberOfActivePlanes; i++) + DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0]; } @@ -4026,7 +4030,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateSwathAndDETConfiguration( true, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -4166,6 +4171,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) { v->DISPCLK_DPPCLK_Support[i][j] = false; } + if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) { + v->MPCCombine[i][j][k] = true; + v->NoOfDPP[i][j][k] = 2; + } } v->TotalNumberOfActiveDPP[i][j] = 0; v->TotalNumberOfSingleDPPPlanes[i][j] = 0; @@ -4642,12 +4651,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k]; } - if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315) - PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]); + if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0]) + PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte); CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte[0], + mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0], + v->DETBufferSizeInKByte, v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -6611,7 +6621,8 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - unsigned int DETBufferSizeInKByte, + bool DETSharedByAllDPP, + unsigned int DETBufferSizeInKByteA[], double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -6695,6 +6706,10 @@ static void CalculateSwathAndDETConfiguration( *ViewportSizeSupport = true; for (k = 0; k < NumberOfActivePlanes; ++k) { + unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k]; + + if (DETSharedByAllDPP && DPPPerPlane[k]) + DETBufferSizeInKByte /= DPPPerPlane[k]; if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16 || SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) { if (SurfaceTiling[k] == dm_sw_linear diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index f9653f511baa..2f63ae954826 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -571,6 +571,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate; mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy; mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override; + if (src->det_size_override) + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override; + else + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes; //TODO: Need to assign correct values to dp_multistream vars mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en; mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id; @@ -785,6 +789,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[k] = mode_lib->vba.NumberOfActivePlanes; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++; + if (src_k->det_size_override) + mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override; if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] == dm_horz) { mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] += From b058e3999021e04cd16d1e487732e20ad1f4b4f6 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 10 Apr 2023 17:17:07 -0400 Subject: [PATCH 115/861] drm/amd/display: Enable SubVP on PSR panels if single stream Enable SubVP on PSR panels now that we have FW support Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 1 + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index d6f0ecb708cd..dc0a61ca99d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -209,6 +209,7 @@ struct dc_color_caps { struct dc_dmub_caps { bool psr; bool mclk_sw; + bool subvp_psr; }; struct dc_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 9ce11ed769a0..1e9ada8b131c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -944,6 +944,7 @@ void dcn32_init_hw(struct dc *dc) if (dc->ctx->dmub_srv) { dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; + dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index ed7ea4c42412..e1e82ad4c583 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -692,7 +692,8 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) && + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && From b5389eca7b39026452dfc1d2cd5b05448c631298 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Tue, 11 Apr 2023 10:49:38 -0400 Subject: [PATCH 116/861] drm/amd/display: Lowering min Z8 residency time [Why & How] Per HW team request, we're lowering the minimum Z8 residency time to 2000us. This enables Z8 support for additional modes we were previously blocking like 2k>60hz Cc: stable@vger.kernel.org Tested-by: Daniel Wheeler Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Leo Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 24806acc8438..abeeede38fb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -885,7 +885,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .enable_z9_disable_interface = true, - .minimum_z8_residency_time = 3080, + .minimum_z8_residency_time = 2000, .psr_skip_crtc_disable = true, .disable_dmcu = true, .force_abm_enable = false, From 5e9252d8415f50095c854c85cf9ebcc894e9ac0d Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Mon, 3 Apr 2023 10:42:06 -0400 Subject: [PATCH 117/861] drm/amd/display: add option to use custom backlight caps [Why & How] Provide option for vendors to specify a custom brightness-to-backlight conversion profile. Tested-by: Daniel Wheeler Reviewed-by: Anthony Koo Acked-by: Rodrigo Siqueira Signed-off-by: Josip Pavic Signed-off-by: Alex Deucher --- .../amd/display/modules/power/power_helpers.c | 43 +++++++++++++++++++ .../amd/display/modules/power/power_helpers.h | 3 ++ 2 files changed, 46 insertions(+) diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 51e76bce92ea..68d95b92df76 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -116,6 +116,27 @@ static const struct abm_parameters * const abm_settings[] = { abm_settings_config2, }; +const struct dm_bl_data_point custom_backlight_curve0[] = { + {2, 14}, {4, 16}, {6, 18}, {8, 21}, {10, 23}, {12, 26}, {14, 29}, {16, 32}, {18, 35}, + {20, 38}, {22, 41}, {24, 44}, {26, 48}, {28, 52}, {30, 55}, {32, 59}, {34, 62}, + {36, 67}, {38, 71}, {40, 75}, {42, 80}, {44, 84}, {46, 88}, {48, 93}, {50, 98}, + {52, 103}, {54, 108}, {56, 113}, {58, 118}, {60, 123}, {62, 129}, {64, 135}, {66, 140}, + {68, 146}, {70, 152}, {72, 158}, {74, 164}, {76, 171}, {78, 177}, {80, 183}, {82, 190}, + {84, 197}, {86, 204}, {88, 211}, {90, 218}, {92, 225}, {94, 232}, {96, 240}, {98, 247}}; + +struct custom_backlight_profile { + uint8_t ac_level_percentage; + uint8_t dc_level_percentage; + uint8_t min_input_signal; + uint8_t max_input_signal; + uint8_t num_data_points; + const struct dm_bl_data_point *data_points; +}; + +static const struct custom_backlight_profile custom_backlight_profiles[] = { + {100, 32, 12, 255, ARRAY_SIZE(custom_backlight_curve0), custom_backlight_curve0}, +}; + #define NUM_AMBI_LEVEL 5 #define NUM_AGGR_LEVEL 4 #define NUM_POWER_FN_SEGS 8 @@ -944,3 +965,25 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } + +bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps) +{ + unsigned int data_points_size; + + if (config_no >= ARRAY_SIZE(custom_backlight_profiles)) + return false; + + data_points_size = custom_backlight_profiles[config_no].num_data_points + * sizeof(custom_backlight_profiles[config_no].data_points[0]); + + caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size; + caps->flags = 0; + caps->error_code = 0; + caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage; + caps->dc_level_percentage = custom_backlight_profiles[config_no].dc_level_percentage; + caps->min_input_signal = custom_backlight_profiles[config_no].min_input_signal; + caps->max_input_signal = custom_backlight_profiles[config_no].max_input_signal; + caps->num_data_points = custom_backlight_profiles[config_no].num_data_points; + memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size); + return true; +} diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 1d3079e56799..ffc924c9991b 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -62,4 +62,7 @@ bool mod_power_only_edp(const struct dc_state *context, bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, struct dc_stream_state *stream, struct psr_config *config); + +bool fill_custom_backlight_caps(unsigned int config_no, + struct dm_acpi_atif_backlight_caps *caps); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ From 7a1187eab0111ac52ec216f2c18cb7822fec4a4c Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Tue, 21 Mar 2023 11:31:22 -0400 Subject: [PATCH 118/861] drm/amd/display: Program OTG vtotal min/max selectors unconditionally OTG_V_TOTAL_MIN/MAX_SEL bits are required to be programmed to 1 if writes to OTG timing registers need to be honoured. This is usually needed only when freesync is active. However, SubVP + DRR requires that we're able to change timing even without freesync being active (but supported). By unconditionally writing this bit to 1, we remove an unnecessary dependency so that DMCUB can change OTG timing whenever it wants. Signed-off-by: Aurabindo Pillai Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c index 2ee798965bc2..6ef56fb32131 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c @@ -245,16 +245,9 @@ static void optc32_set_drr( } optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - optc32_setup_manual_trigger(optc); - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); } + + optc32_setup_manual_trigger(optc); } static struct timing_generator_funcs dcn32_tg_funcs = { From 1068e987ad0be83a109147fe7fa0891700e8d80e Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Tue, 28 Mar 2023 16:26:29 -0400 Subject: [PATCH 119/861] drm/amd/display: Update scaler recout data for visual confirm [Why] Our QA found visual confirm color is not as expected for Auto Color Management feature test after enable it. [How] Calculate scaler recout data when visual confirm enabled to update the visual confirm bar on the display. Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Leo (Hanghong) Ma Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/core/dc_resource.c | 18 ++++++++++++++++++ .../drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 18 +----------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2f704e26219c..193e09b05f5a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -69,6 +69,10 @@ #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" +#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 +#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 +#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 + #define DC_LOGGER_INIT(logger) enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) @@ -807,6 +811,8 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) struct rect surf_clip = plane_state->clip_rect; bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; int split_count, split_idx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) @@ -875,6 +881,18 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) data->recout.width = data->h_active - data->recout.x; } } + + /* Check bounds to ensure the VC bar height was set to a sane value */ + if (dpp != NULL) { + if ((dpp->ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && + (dpp->ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { + visual_confirm_rect_height = dpp->ctx->dc->debug.visual_confirm_rect_height; + } + + if (dpp->ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) + data->recout.height = data->recout.height - + 2 * (dpp->inst + visual_confirm_rect_height); + } } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index b33955928bd0..7e140c35a0ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,9 +39,6 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 -#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 -#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 -#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define REG(reg)\ dpp->tf_regs->reg @@ -591,18 +588,6 @@ static void dpp1_dscl_set_manual_ratio_init( static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { - int visual_confirm_on = 0; - unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; - - if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) - visual_confirm_on = 1; - - /* Check bounds to ensure the VC bar height was set to a sane value */ - if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && - (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { - visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; - } - REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -613,8 +598,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, /* Number of RECOUT horizontal pixels */ RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); + RECOUT_HEIGHT, recout->height); } /** From f477c7b5ec3e4ef87606671b340abf3bdb0cccff Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Mon, 10 Apr 2023 11:35:44 +0800 Subject: [PATCH 120/861] drm/amd/display: Fix in secure display context creation [Why & How] We need to store CRTC information in secure_display_ctx, so postpone the call to amdgpu_dm_crtc_secure_display_create_contexts() until we initialize all CRTCs. Cc: stable@vger.kernel.org Tested-by: Daniel Wheeler Reviewed-by: Wayne Lin Acked-by: Rodrigo Siqueira Signed-off-by: Alan Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 +++++------ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b265434c77e1..7f0dcd7af4c7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1776,12 +1776,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_init_callbacks(adev->dm.dc, &init_params); } -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); - if (!adev->dm.secure_display_ctxs) { - DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n"); - } -#endif if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); @@ -1840,6 +1834,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); + if (!adev->dm.secure_display_ctxs) + DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); +#endif DRM_DEBUG_DRIVER("KMS initialized.\n"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index 935adca6f048..748e80ef40d0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -100,7 +100,7 @@ struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts( #else #define amdgpu_dm_crc_window_is_activated(x) #define amdgpu_dm_crtc_handle_crc_window_irq(x) -#define amdgpu_dm_crtc_secure_display_create_contexts() +#define amdgpu_dm_crtc_secure_display_create_contexts(x) #endif #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ From 469a62938a45ef382c9cb7b9fec6c6c1fcd781c0 Mon Sep 17 00:00:00 2001 From: Gabe Teeger Date: Thu, 6 Apr 2023 17:03:06 -0400 Subject: [PATCH 121/861] drm/amd/display: update extended blank for dcn314 onwards [Why] Flickering and underflow was observed when testing extended blank on dcn314. [What] Vstartup is contrainted by vblank_nom, so adjusting it to include non-adjusted vtotal in its calculation during freesync video means that Vstartup is not changed when vtotal changes. This fixed the flickering + underflow. dc_extended_blank_supported function was removed because extended blank is only relevant to when zstate is supported. The increased vtotal during freesync can be passed to dml regardless of whether extended blank is supported or not, so this function is not needed. Updates were made recently in dml to the calculation of min_dst_y_next_start. Dml input for dcn314 will now always use the newer calculation for min_dst_y_next_start. Dml input for older dcn versions remains untouched. The variable optimized_min_dst_y_next_start is replaced everywhere with min_dst_y_next_start, and the updated dml allows min_dst_y_next_start to increase to an optimized value during freesync video, then return to default when freesync is disengaged. Also removed registry key for controlling extended blank feature. Tested-by: Daniel Wheeler Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Gabe Teeger Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 21 ----------------- drivers/gpu/drm/amd/display/dc/dc.h | 2 -- .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++-- .../drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 23 +++++++++---------- .../dc/dml/dcn31/display_rq_dlg_calc_31.c | 3 +-- .../amd/display/dc/dml/dcn314/dcn314_fpu.c | 14 +++++++---- .../dc/dml/dcn314/display_rq_dlg_calc_314.c | 16 ++++--------- .../amd/display/dc/dml/display_mode_structs.h | 3 +-- 8 files changed, 29 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c10243ef93b6..89a245e3c9ac 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2607,9 +2607,6 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->vrr_active_variable)) su_flags->bits.fams_changed = 1; - if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) - su_flags->bits.crtc_timing_adjust = 1; - if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -4849,21 +4846,3 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } - -/** - * dc_extended_blank_supported - Decide whether extended blank is supported - * - * @dc: [in] Current DC state - * - * Extended blank is a freesync optimization feature to be enabled in the - * future. During the extra vblank period gained from freesync, we have the - * ability to enter z9/z10. - * - * Return: - * Indicate whether extended blank is supported (%true or %false) - */ -bool dc_extended_blank_supported(struct dc *dc) -{ - return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 - && dc->caps.zstate_support && dc->caps.is_apu; -} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index dc0a61ca99d0..5268d98b96dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -2128,8 +2128,6 @@ struct dc_sink_init_data { bool converter_disable_audio; }; -bool dc_extended_blank_supported(struct dc *dc); - struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params); /* Newer interfaces */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5800acf6aae1..f49c1c0d6274 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2130,7 +2130,7 @@ void dcn20_optimize_bandwidth( dc->clk_mgr, context, true); - if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { + if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2138,7 +2138,7 @@ void dcn20_optimize_bandwidth( && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total) pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp, - pipe_ctx->dlg_regs.optimized_min_dst_y_next_start); + pipe_ctx->dlg_regs.min_dst_y_next_start); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index f1c1a4b5fcac..7661f8946aa3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc { int plane_count; int i; - unsigned int optimized_min_dst_y_next_start_us; + unsigned int min_dst_y_next_start_us; plane_count = 0; - optimized_min_dst_y_next_start_us = 0; + min_dst_y_next_start_us = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; @@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; struct dc_stream_status *stream_status = &context->stream_status[0]; + struct dc_stream_state *current_stream = context->streams[0]; int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; bool is_pwrseq0 = link->link_index == 0; + bool isFreesyncVideo; - if (dc_extended_blank_supported(dc)) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream == context->streams[0] - && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max - && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) { - optimized_min_dst_y_next_start_us = - context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us; - break; - } + isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max; + isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) { + min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us; + break; } } @@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc if (stream_status->plane_count > 1) return DCN_ZSTATE_SUPPORT_DISALLOW; - if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)) + if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000)) return DCN_ZSTATE_SUPPORT_ALLOW; else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index 2244e4fb8c96..fcde8f21b8be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -987,8 +987,7 @@ static void dml_rq_dlg_get_dlg_params( dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2)); - disp_dlg_regs->optimized_min_dst_y_next_start_us = 0; - disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start; + disp_dlg_regs->min_dst_y_next_start_us = 0; ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18)); dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 44082f65de1f..19370b872a91 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -286,6 +286,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool upscaled = false; + bool isFreesyncVideo = false; dc_assert_fp_enabled(); @@ -299,9 +300,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min - && pipe->stream->adjust.v_total_min > timing->v_total) + isFreesyncVideo = pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min; + isFreesyncVideo = isFreesyncVideo && pipe->stream->adjust.v_total_min > timing->v_total; + + if (!isFreesyncVideo) { + pipes[pipe_cnt].pipe.dest.vblank_nom = + dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0)); + } else { pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; + } if (pipe->plane_state && (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || @@ -323,8 +331,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.dest.vblank_nom = - dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0)); pipes[pipe_cnt].pipe.src.dcc_rate = 3; pipes[pipe_cnt].dout.dsc_input_bpc = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index ea4eb66066c4..4f945458b2b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -1051,7 +1051,6 @@ static void dml_rq_dlg_get_dlg_params( float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA - int blank_lines = 0; memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs)); memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs)); @@ -1075,17 +1074,10 @@ static void dml_rq_dlg_get_dlg_params( min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; - disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start; - disp_dlg_regs->optimized_min_dst_y_next_start_us = 0; - disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2)); - blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1); - if (blank_lines < 0) - blank_lines = 0; - if (blank_lines != 0) { - disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start; - disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz; - disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start; - } + disp_dlg_regs->min_dst_y_next_start_us = + (vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz; + disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2); + ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18)); dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 3c077164f362..ff0246a9458f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -619,8 +619,7 @@ struct _vcs_dpi_display_dlg_regs_st { unsigned int refcyc_h_blank_end; unsigned int dlg_vblank_end; unsigned int min_dst_y_next_start; - unsigned int optimized_min_dst_y_next_start; - unsigned int optimized_min_dst_y_next_start_us; + unsigned int min_dst_y_next_start_us; unsigned int refcyc_per_htotal; unsigned int refcyc_x_after_scaler; unsigned int dst_y_after_scaler; From 9c25ab167df412a5474dedfd0e7743e76bc89cbe Mon Sep 17 00:00:00 2001 From: Sung Lee Date: Mon, 10 Apr 2023 14:15:14 -0400 Subject: [PATCH 122/861] drm/amd/display: Add p-state debugging [WHY] P-State related issues are fairly common but currently there is no way to debug these issues after the fact. [HOW] Add helpful registers to HW state queries Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Sung Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 5 +++++ drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 1 + drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 3 ++- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 3 +++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h | 1 + 5 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 24bd93219936..0ddd310cc971 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -623,6 +623,11 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); } + + if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) { + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); + hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + } } static const struct hubbub_funcs hubbub2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index beb26dc8a07f..aa80b3f2ca3f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -111,6 +111,7 @@ struct dcn_hubbub_state { uint32_t vm_error_vmid; uint32_t vm_error_pipe; uint32_t vm_error_mode; + uint32_t test_debug_data; }; struct hubbub_funcs { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index d35432c21856..7c9a2b34bd05 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -271,7 +271,7 @@ struct dmub_srv_hw_params { */ struct dmub_diagnostic_data { uint32_t dmcub_version; - uint32_t scratch[16]; + uint32_t scratch[17]; uint32_t pc; uint32_t undefined_address_fault_addr; uint32_t inst_fetch_fault_addr; @@ -282,6 +282,7 @@ struct dmub_diagnostic_data { uint32_t inbox0_rptr; uint32_t inbox0_wptr; uint32_t inbox0_size; + uint32_t gpint_datain0; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 21dd6cbdb106..bf5994e292d9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -439,6 +439,7 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); + diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); @@ -469,6 +470,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + + diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index f15336b6e22b..d58a1e4b9f1c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -107,6 +107,7 @@ struct dmub_srv; DMUB_SR(DMCUB_SCRATCH15) \ DMUB_SR(DMCUB_SCRATCH16) \ DMUB_SR(DMCUB_SCRATCH17) \ + DMUB_SR(DMCUB_GPINT_DATAIN0) \ DMUB_SR(DMCUB_GPINT_DATAIN1) \ DMUB_SR(DMCUB_GPINT_DATAOUT) \ DMUB_SR(CC_DC_PIPE_DIS) \ From c8cefb99fc811304fff9590677994531ff0ac992 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Wed, 12 Apr 2023 15:42:41 -0400 Subject: [PATCH 123/861] drm/amd/display: For no plane case set pstate support in validation - Previously update_clocks was overriding pstate support if it checked that there were no planes - However, P-State support should be determined in validation phase instead - This fixes an issue where a transition from FPO -> no planes expects UCLK MAX, but update_clocks was overriding to set UCLK to min Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 4 +--- .../gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 7 ++----- drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 8 +++++++- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 1 + 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 694a9d3d92ae..3908e7cfd6cb 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -206,7 +206,6 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool update_uclk = false; bool p_state_change_support; - int total_plane_count; if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present) return; @@ -247,8 +246,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); - p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); + p_state_change_support = new_clocks->p_state_change_support; // invalidate the current P-State forced min in certain dc_mode_softmax situations if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index eea103908b09..85e963ec25ab 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -459,7 +459,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, bool update_uclk = false, update_fclk = false; bool p_state_change_support; bool fclk_p_state_change_support; - int total_plane_count; if (dc->work_arounds.skip_clock_update) return; @@ -488,8 +487,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support; - total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); - fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0); + fclk_p_state_change_support = new_clocks->fclk_p_state_change_support; if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) { clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support; @@ -528,8 +526,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways); } - - p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); + p_state_change_support = new_clocks->p_state_change_support; if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.p_state_change_support = p_state_change_support; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 7661f8946aa3..a5b1e4bb1a22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1042,7 +1042,7 @@ void dcn20_calculate_dlg_params(struct dc *dc, int pipe_cnt, int vlevel) { - int i, pipe_idx; + int i, pipe_idx, active_hubp_count = 0; dc_assert_fp_enabled(); @@ -1078,6 +1078,8 @@ void dcn20_calculate_dlg_params(struct dc *dc, for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; + if (context->res_ctx.pipe_ctx[i].plane_state) + active_hubp_count++; pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); @@ -1104,6 +1106,10 @@ void dcn20_calculate_dlg_params(struct dc *dc, pipe_idx++; } + /* If DCN isn't making memory requests we can allow pstate change */ + if (!active_hubp_count) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + } /*save a original dppclock copy*/ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index e1e82ad4c583..2624236d20d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1433,6 +1433,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->bw_ctx.bw.dcn.clk.dramclk_khz = 0; context->bw_ctx.bw.dcn.clk.fclk_khz = 0; context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; } /*save a original dppclock copy*/ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; From 87f0c16e0eeb672fb888b4e173edff0252e02757 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 17 Apr 2023 10:23:05 -0400 Subject: [PATCH 124/861] drm/amd/display: Enable SubVP for high refresh rate displays [Description] - Add debug option to enable SubVP for high refresh rate displays - For now limit the enabled modes based on a table in debug options - Currently disabled by default Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../drm/amd/display/dc/dcn32/dcn32_resource.c | 1 + .../drm/amd/display/dc/dcn32/dcn32_resource.h | 14 +++++ .../display/dc/dcn32/dcn32_resource_helpers.c | 15 +++++ .../amd/display/dc/dcn321/dcn321_resource.c | 1 + .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 62 ++++++++++++++++++- .../dc/dml/dcn32/display_mode_vba_32.c | 12 ++-- 7 files changed, 101 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5268d98b96dc..e41107098ffe 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -881,6 +881,7 @@ struct dc_debug_options { bool disable_boot_optimizations; bool override_odm_optimization; bool minimize_dispclk_using_odm; + bool disable_subvp_high_refresh; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 0085ea78ea31..7feeba78c0c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -728,6 +728,7 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_margin_us = 2000, // 2000us .disable_fpo_vactive = false, .disable_boot_optimizations = false, + .disable_subvp_high_refresh = true, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 3937dbc1e552..04be01ae1ecf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -40,6 +40,7 @@ #define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100 +#define SUBVP_HIGH_REFRESH_LIST_LEN 3 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) @@ -47,6 +48,15 @@ extern struct _vcs_dpi_ip_params_st dcn3_2_ip; extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc; +struct subvp_high_refresh_list { + int min_refresh; + int max_refresh; + struct resolution { + int width; + int height; + } res[SUBVP_HIGH_REFRESH_LIST_LEN]; +}; + struct dcn32_resource_pool { struct resource_pool base; }; @@ -151,10 +161,14 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); +bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe); + unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context); +bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index eeca16faf31a..df912c333bbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -656,3 +656,18 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre return fpo_candidate_stream; } + +bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height) +{ + bool is_native_scaling = false; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->src_rect.width == width && + pipe->plane_state->src_rect.height == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 138657c38afe..63bd6928c82f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -727,6 +727,7 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_margin_us = 2000, // 2000us .disable_fpo_vactive = false, .disable_boot_optimizations = false, + .disable_subvp_high_refresh = true, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 2624236d20d0..9cc0e6089032 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -35,6 +35,15 @@ #define DC_LOGGER_INIT(logger) +static const struct subvp_high_refresh_list subvp_high_refresh_list = { + .min_refresh = 120, + .max_refresh = 165, + .res = { + {.width = 3840, .height = 2160, }, + {.width = 3440, .height = 1440, }, + {.width = 2560, .height = 1440, }}, +}; + struct _vcs_dpi_ip_params_st dcn3_2_ip = { .gpuvm_enable = 0, .gpuvm_max_page_table_levels = 4, @@ -694,7 +703,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, */ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && - pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && + pipe->stream->mall_stream_config.type == SUBVP_NONE && + (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && + !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && dcn32_allow_subvp_with_active_margin(pipe)))) { @@ -2789,6 +2800,55 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) return allow; } +/** + * ************************************************************************************************ + * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp + * + * @param [in]: dc: Current DC state + * @param [in]: context: New DC state to be programmed + * @param [in]: pipe: Pipe to be considered for use in subvp + * + * On high refresh rate display configs, we will allow subvp under the following conditions: + * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440 + * 2. Refresh rate is between 120hz - 165hz + * 3. No scaling + * 4. Freesync is inactive + * 5. For single display cases, freesync must be disabled + * + * @return: True if pipe can be used for subvp, false otherwise + * + * ************************************************************************************************ + */ +bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe) +{ + bool allow = false; + uint32_t refresh_rate = 0; + uint32_t min_refresh = subvp_high_refresh_list.min_refresh; + uint32_t max_refresh = subvp_high_refresh_list.max_refresh; + uint32_t i; + + if (!dc->debug.disable_subvp_high_refresh && pipe->stream && + pipe->plane_state && !pipe->stream->vrr_active_variable) { + refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) + / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); + if (refresh_rate >= min_refresh && refresh_rate <= max_refresh) { + for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) { + uint32_t width = subvp_high_refresh_list.res[i].width; + uint32_t height = subvp_high_refresh_list.res[i].height; + + if (dcn32_check_native_scaling_for_res(pipe, width, height)) { + if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) { + allow = true; + break; + } + } + } + } + } + return allow; +} + /** * ******************************************************************************************* * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 13c7e7394b1c..66f44a013fe5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -2322,10 +2322,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.LinkCapacitySupport[i] = true; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k - && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0 - || mode_lib->vba.Output[k] == dm_edp - || mode_lib->vba.Output[k] == dm_hdmi) - && mode_lib->vba.OutputBppPerState[i][k] == 0) { + && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0 + || mode_lib->vba.Output[k] == dm_edp + || mode_lib->vba.Output[k] == dm_hdmi) + && mode_lib->vba.OutputBppPerState[i][k] == 0 && + (mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)) { + /* Phantom pipes don't consider DSC in DML, so it could fail link check. + * However, we don't care about the link for phantom pipes. + */ mode_lib->vba.LinkCapacitySupport[i] = false; } } From 1938bcdc4b530d6413e03f238c2df56f99f17220 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Wed, 12 Apr 2023 17:06:46 -0400 Subject: [PATCH 125/861] drm/amd/display: Query GECC enable for SubVP disable - We want to disable SubVP if Graphics Error Correction/Correcting Code (GECC) is enabled. - After reading feature caps from DMCUB during init, use the GECC enable/disable info to determine if SubVP can be enabled or not. Tested-by: Daniel Wheeler Reviewed-by: Nicholas Kazlauskas Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 1 + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e41107098ffe..f52ab717ac7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -210,6 +210,7 @@ struct dc_dmub_caps { bool psr; bool mclk_sw; bool subvp_psr; + bool gecc_enable; }; struct dc_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 1e9ada8b131c..4950eaa4406b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -945,6 +945,7 @@ void dcn32_init_hw(struct dc *dc) dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; + dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 9cc0e6089032..a73cacef6e48 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1141,7 +1141,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) */ - if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) && + if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) && !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && (*vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || From 5a096b73c8fed3a9987ba15378285df360e2284b Mon Sep 17 00:00:00 2001 From: Michael Strauss Date: Tue, 11 Apr 2023 12:44:54 -0400 Subject: [PATCH 126/861] drm/amd/display: Keep disable aux-i delay as 0 [WHY] Current Aux-I sequence checks for local_sink which isn't populated on MST links [HOW] Leave disable aux-i delay as 0 for MST cases Cc: stable@vger.kernel.org Tested-by: Daniel Wheeler Reviewed-by: George Shen Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Michael Strauss Signed-off-by: Alex Deucher --- .../link_dp_training_fixed_vs_pe_retimer.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index fb6c938c6dab..15faaf645b14 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -233,8 +233,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; + uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; uint32_t vendor_lttpr_write_address = 0xF004F; @@ -245,6 +244,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint8_t toggle_rate; uint8_t rate; + if (link->local_sink) + pre_disable_intercept_delay_ms = + link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; + /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -595,10 +598,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - - + uint32_t pre_disable_intercept_delay_ms = 0; uint32_t vendor_lttpr_write_address = 0xF004F; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; @@ -607,6 +607,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( uint8_t toggle_rate; uint8_t rate; + if (link->local_sink) + pre_disable_intercept_delay_ms = + link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; + /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); From 612c5ad50c3e5505e674b7cc50bf6527bc0adee6 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 16 Apr 2023 20:35:01 -0400 Subject: [PATCH 127/861] drm/amd/display: 3.2.233 This DC version brings along: - Improvements in the SubVP feature - Keep disable aux-i delay as 0 - Add p-state debugging and improvements - Fix in secure display context creation - add option to use custom backlight caps - Lowering min Z8 residency time - Restore rptr/wptr for DMCUB as workaround - Update FW feature caps struct Acked-by: Rodrigo Siqueira Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f52ab717ac7d..4424e7abb801 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.232" +#define DC_VER "3.2.233" #define MAX_SURFACES 3 #define MAX_PLANES 6 From f38129bb081758176dd78304faaee95007fb8838 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Tue, 21 Mar 2023 10:25:04 -0400 Subject: [PATCH 128/861] Revert "drm/amd/display: disable SubVP + DRR to prevent underflow" This reverts commit 80c6d6804f31451848a3956a70c2bcb1f07cfcb0. The orignal commit was intended as a workaround to prevent underflow and flickering when using one normal monitor and the other high refresh rate monitor (> 120Hz). This patch is being reverted in favour of a software solution to enable SubVP+DRR Signed-off-by: Aurabindo Pillai Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ----- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 4 ---- drivers/gpu/drm/amd/include/amd_shared.h | 1 - 3 files changed, 10 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7f0dcd7af4c7..dd56e1512b0a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1646,11 +1646,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; - /* Disable SubVP + DRR config by default */ - init_data.flags.disable_subvp_drr = true; - if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR) - init_data.flags.disable_subvp_drr = false; - init_data.flags.seamless_boot_edp_requested = false; if (check_seamless_boot_capability(adev)) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a73cacef6e48..826059d5b367 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -892,10 +892,6 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; - const struct dc_config *config = &dc->config; - - if (config->disable_subvp_drr) - return false; // Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index e4a22c68517d..f175e65b853a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -240,7 +240,6 @@ enum DC_FEATURE_MASK { DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default - DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default }; enum DC_DEBUG_MASK { From 179661ad45cb18ba7702ebafde5f22aea47be5e0 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Fri, 21 Apr 2023 11:30:18 +0800 Subject: [PATCH 129/861] drm/amdgpu: support psp vbflash sysfs for MP0 13_0_10 Add support for PSP vbflash sysfs interface with MP0 version v13.0.10. Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a496bf2fb199..c58654a8b6c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3618,6 +3618,7 @@ int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) switch (adev->ip_versions[MP0_HWIP][0]) { case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): + case IP_VERSION(13, 0, 10): if (!psp->adev) { psp->adev = adev; psp_v13_0_set_psp_funcs(psp); From b91075866e58e6b073689958f246834ad0b2c79c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Apr 2023 12:02:29 -0400 Subject: [PATCH 130/861] drm/amdgpu/gfx11: update gpu_clock_counter logic This code was written prior to previous updates to this logic for other chips. The RSC registers are part of SMUIO which is an always on block so there is no need to disable gfxoff. Additionally add the carryover and preemption checks. v2: rebase Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 543af07ff102..8a4c4769e607 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4714,24 +4714,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); if (amdgpu_sriov_vf(adev)) { + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); } else { + preempt_disable(); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + preempt_enable(); } clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); + return clock; } From e94c25567c8519b770985cc86b652b06dce807d2 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 20 Apr 2023 14:07:05 -0400 Subject: [PATCH 131/861] drm/amd/display: remove unused variables dispclk_delay_subtotal and dout clang with W=1 reports drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn314/display_rq_dlg_calc_314.c:1003:15: error: variable 'dispclk_delay_subtotal' set but not used [-Werror,-Wunused-but-set-variable] unsigned int dispclk_delay_subtotal; ^ This variable is not used, so remove it. Which made dout unused, so also remove. Signed-off-by: Tom Rix Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../display/dc/dml/dcn314/display_rq_dlg_calc_314.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 4f945458b2b7..61ba3e33bb11 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -951,7 +951,6 @@ static void dml_rq_dlg_get_dlg_params( { const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src; const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest; - const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout; const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg; const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth; const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps; @@ -1000,8 +999,6 @@ static void dml_rq_dlg_get_dlg_params( unsigned int vupdate_width; unsigned int vready_offset; - unsigned int dispclk_delay_subtotal; - unsigned int vstartup_start; unsigned int dst_x_after_scaler; unsigned int dst_y_after_scaler; @@ -1119,13 +1116,6 @@ static void dml_rq_dlg_get_dlg_params( vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; - dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal; - - if (dout->dsc_enable) { - double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA - - dispclk_delay_subtotal += dsc_delay; - } vstartup_start = dst->vstartup_start; if (interlaced) { From 609d830048fb00d4fdea59fc9d17a8d63fbddb4a Mon Sep 17 00:00:00 2001 From: Sukrut Bellary Date: Sun, 23 Apr 2023 22:59:10 -0700 Subject: [PATCH 132/861] drm:amd:amdgpu: Fix missing bo unlock in failure path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit smatch warning - inconsistent handling of buffer object reserve and unreserve. Reviewed-by: Christian König Signed-off-by: Sukrut Bellary Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 278416acf060..5de44d7e92de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4686,8 +4686,10 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v8_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); From 960e27a5741cd3001996ff6ddfb3eb0ed3a4909d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 17 Apr 2023 23:35:08 +0200 Subject: [PATCH 133/861] drm/amd/display: Fix a test CalculatePrefetchSchedule() It is likely Height was expected here, instead of Width. Test the correct variable. Fixes: 17529ea2acfa ("drm/amd/display: Optimizations for DML math") Signed-off-by: Christophe JAILLET Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index b7c2844d0cbe..f294f2f8c75b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -810,7 +810,7 @@ static bool CalculatePrefetchSchedule( *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC; } else { *swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY; - if (myPipe->BlockWidth256BytesC > 0) + if (myPipe->BlockHeight256BytesC > 0) *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC; } From bafc31166aa7df5fa26ae0ad8196d1717e6cdea9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 17 Apr 2023 23:41:11 +0200 Subject: [PATCH 134/861] drm/amd/display: Fix a test dml32_rq_dlg_get_rq_reg() It is likely p1_min_meta_chunk_bytes was expected here, instead of min_meta_chunk_bytes. Test the correct variable. Fixes: dda4fb85e433 ("drm/amd/display: DML changes for DCN32/321") Signed-off-by: Christophe JAILLET Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index 395ae8761980..9ba6cb67655f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -116,7 +116,7 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs, else rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1; - if (min_meta_chunk_bytes == 0) + if (p1_min_meta_chunk_bytes == 0) rq_regs->rq_regs_c.min_meta_chunk_size = 0; else rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1; From 489763af891d5dc35c0b64e18af284d6591286cf Mon Sep 17 00:00:00 2001 From: Pierre-Eric Pelloux-Prayer Date: Mon, 24 Apr 2023 19:25:32 +0200 Subject: [PATCH 135/861] drm/amdgpu: add new flag to AMDGPU_CTX_QUERY2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OpenGL EXT_robustness extension expects the driver to stop reporting GUILTY_CONTEXT_RESET when the reset has completed and the GPU is ready to accept submission again. This commit adds a AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS flag, that let the UMD know that the reset is still not finished. Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22290 Reviewed-by: Christian König Reviewed-by: André Almeida Signed-off-by: Pierre-Eric Pelloux-Prayer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- include/uapi/drm/amdgpu_drm.h | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index d2139ac12159..e1f642a3dc2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -576,6 +576,9 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; + if (amdgpu_in_reset(adev)) + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS; + if (adev->ras_enabled && con) { /* Return the cached values in O(1), * and schedule delayed work to cache diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b987022e11b4..45544ebe576e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -111,9 +111,10 @@ * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size, * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi * 3.53.0 - Support for GFX11 CP GFX shadowing + * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 53 +#define KMS_DRIVER_MINOR 54 #define KMS_DRIVER_PATCHLEVEL 0 unsigned int amdgpu_vram_limit = UINT_MAX; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index cc78528c3b4b..79b14828d542 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -245,6 +245,8 @@ union drm_amdgpu_bo_list { /* indicate some errors are detected by RAS */ #define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3) #define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4) +/* indicate that the reset hasn't completed yet */ +#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5) /* Context priority level */ #define AMDGPU_CTX_PRIORITY_UNSET -2048 From 535f77861084467eaf37bbbb4d4aaa96f5fe4c1c Mon Sep 17 00:00:00 2001 From: lyndonli Date: Sun, 23 Apr 2023 16:46:30 +0800 Subject: [PATCH 136/861] drm/amdgpu: Fix mode2 reset for sienna cichlid Before this change, sienna_cichlid_get_reset_handler will always return NULL, although the module parameter reset_method is 3 when loading amdgpu driver. Signed-off-by: lyndonli Signed-off-by: Yunxiang Li Reviewed-by: Feifei Xu Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 81a6d5b94987..8b8086d5c864 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -40,7 +40,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) return true; #endif - return false; + return amdgpu_reset_method == AMD_RESET_METHOD_MODE2; } static struct amdgpu_reset_handler * From 59e9fff1983eaa9f226699f66f7d58da432dfb16 Mon Sep 17 00:00:00 2001 From: lyndonli Date: Sun, 23 Apr 2023 17:05:15 +0800 Subject: [PATCH 137/861] drm/amdgpu: Use the default reset when loading or reloading the driver Below call trace and errors are observed when reloading amdgpu driver with the module parameter reset_method=3. It should do a default reset when loading or reloading the driver, regardless of the module parameter reset_method. v2: add comments inside and modify commit messages. [ +2.180243] [drm] psp gfx command ID_LOAD_TOC(0x20) failed and response status is (0x0) [ +0.000011] [drm:psp_hw_start [amdgpu]] *ERROR* Failed to load toc [ +0.000890] [drm:psp_hw_start [amdgpu]] *ERROR* PSP tmr init failed! [ +0.020683] [drm:amdgpu_fill_buffer [amdgpu]] *ERROR* Trying to clear memory with ring turned off. [ +0.000003] RIP: 0010:amdgpu_bo_release_notify+0x1ef/0x210 [amdgpu] [ +0.000004] Call Trace: [ +0.000003] [ +0.000008] ttm_bo_release+0x2c4/0x330 [amdttm] [ +0.000026] amdttm_bo_put+0x3c/0x70 [amdttm] [ +0.000020] amdgpu_bo_free_kernel+0xe6/0x140 [amdgpu] [ +0.000728] psp_v11_0_ring_destroy+0x34/0x60 [amdgpu] [ +0.000826] psp_hw_init+0xe7/0x2f0 [amdgpu] [ +0.000813] amdgpu_device_fw_loading+0x1ad/0x2d0 [amdgpu] [ +0.000731] amdgpu_device_init.cold+0x108e/0x2002 [amdgpu] [ +0.001071] ? do_pci_enable_device+0xe1/0x110 [ +0.000011] amdgpu_driver_load_kms+0x1a/0x160 [amdgpu] [ +0.000729] amdgpu_pci_probe+0x179/0x3a0 [amdgpu] Signed-off-by: lyndonli Signed-off-by: Yunxiang Li Reviewed-by: Feifei Xu Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ac78caa7cba8..1787602fe582 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3579,6 +3579,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, int r, i; bool px = false; u32 max_MBps; + int tmp; adev->shutdown = false; adev->flags = flags; @@ -3800,7 +3801,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } } else { + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; if (r) { dev_err(adev->dev, "asic reset on init failed\n"); goto failed; From 27e6be5d753687764c852af6468c21ef0624022a Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 22 Apr 2023 06:50:14 +0530 Subject: [PATCH 138/861] drm/amd/display: DSC passthrough is for DP-HDMI pcon (SST pcon) if check over DSC passthrough is removed, as this is not for MST use case. It is for DP-HDMI pcon use case. sst pcon is detected as sst not mst. In sst pcon dsc passthrough message will not get below log printed Fixes: 9b035d089086 ("drm/amd/display: Check & log if receiver supports MST, DSC & FEC.") Suggested-by: Fangzhi Zuo Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Fangzhi Zuo Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/link/protocols/link_dp_capability.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 84265dc66bba..577f74cc50fb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1820,9 +1820,8 @@ static bool retrieve_link_cap(struct dc_link *link) str_yes_no(is_fec_supported)); DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__, str_yes_no(is_dsc_basic_supported)); - if (link->dpcd_caps.is_mst_capable) - DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__, - str_yes_no(is_dsc_passthrough_supported)); + DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_passthrough_supported)); } if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { status = core_link_read_dpcd( From b2edaac4f2fb4866c3f9b7be5c39f518fd497a3b Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 26 Apr 2023 00:15:26 +0530 Subject: [PATCH 139/861] drm/amd/amdgpu: Fix style errors in amdgpu_display.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix following checkpatch errors in amdgpu_display.c ERROR: spaces required around that '=' (ctx:VxW) ERROR: that open brace { should be on the previous line ERROR: else should follow close brace '}' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index d60fe7eb5579..b702f499f5fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -98,7 +98,7 @@ static void amdgpu_display_flip_callback(struct dma_fence *f, static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, struct dma_fence **f) { - struct dma_fence *fence= *f; + struct dma_fence *fence = *f; if (fence == NULL) return false; @@ -1252,21 +1252,21 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, }; -static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = -{ { UNDERSCAN_OFF, "off" }, +static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = { + { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; -static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = -{ { AMDGPU_AUDIO_DISABLE, "off" }, +static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = { + { AMDGPU_AUDIO_DISABLE, "off" }, { AMDGPU_AUDIO_ENABLE, "on" }, { AMDGPU_AUDIO_AUTO, "auto" }, }; /* XXX support different dither options? spatial, temporal, both, etc. */ -static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = -{ { AMDGPU_FMT_DITHER_DISABLE, "off" }, +static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { + { AMDGPU_FMT_DITHER_DISABLE, "off" }, { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; @@ -1496,8 +1496,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, ret |= DRM_SCANOUTPOS_ACCURATE; vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; - } - else { + } else { /* No: Fake something reasonable which gives at least ok results. */ vbl_start = mode->crtc_vdisplay; vbl_end = 0; From 1c312e816c4088d183c41b5d944f89775f3789bd Mon Sep 17 00:00:00 2001 From: Shane Xiao Date: Tue, 25 Apr 2023 22:39:08 +0800 Subject: [PATCH 140/861] drm/amdgpu: Enable doorbell selfring after resize FB BAR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] The selfring doorbell aperture will change when resize FB BAR successfully during gmc sw init, we should reorder the sequence of enabling doorbell selfring aperture. [How] Move enable_doorbell_selfring_aperture from *_common_hw_init to *_common_late_init. This fixes the potential issue that GPU ring its own doorbell when this device is in translated mode when iommu is on. v2: Remove *_enable_doorbell_aperture functions (Christian) v3: Add comments to note that why we need enable doorbell selfring late (Christian) Signed-off-by: Shane Xiao Signed-off-by: Aaron Liu Tested-by: Xiaomeng Hou Reviewed-by: Christian K�nig Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 23 +++++++++++++---------- drivers/gpu/drm/amd/amdgpu/soc15.c | 25 +++++++++++++++---------- drivers/gpu/drm/amd/amdgpu/soc21.c | 23 +++++++++++++---------- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index dabeeab2f2ad..3cc068974bcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -531,13 +531,6 @@ static void nv_program_aspm(struct amdgpu_device *adev) } -static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version nv_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, @@ -999,6 +992,11 @@ static int nv_common_late_init(void *handle) } } + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -1038,7 +1036,7 @@ static int nv_common_hw_init(void *handle) if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ - nv_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); return 0; } @@ -1047,8 +1045,13 @@ static int nv_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - nv_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because nv_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4d1487a9836c..3221522e71e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -619,13 +619,6 @@ static void soc15_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version vega10_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, @@ -1125,6 +1118,11 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -1182,7 +1180,8 @@ static int soc15_common_hw_init(void *handle) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ - soc15_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); + /* HW doorbell routing policy: doorbell writing not * in SDMA/IH/MM/ACV range will be routed to CP. So * we need to init SDMA doorbell range prior @@ -1198,8 +1197,14 @@ static int soc15_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - soc15_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc15_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); + if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 7d59303ca2f9..0f82b8e83acb 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -450,13 +450,6 @@ static void soc21_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version soc21_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, @@ -764,6 +757,11 @@ static int soc21_common_late_init(void *handle) amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0); } + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -797,7 +795,7 @@ static int soc21_common_hw_init(void *handle) if (adev->nbio.funcs->remap_hdp_registers) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ - soc21_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); return 0; } @@ -806,8 +804,13 @@ static int soc21_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - soc21_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc21_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_put_irq(adev); From 7e5b601008e781231be77bc8e1a84516f069983d Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Tue, 25 Apr 2023 13:16:32 +0800 Subject: [PATCH 141/861] drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v11_0_hw_fini The gmc.ecc_irq is enabled by firmware per IFWI setting, and the host driver is not privileged to enable/disable the interrupt. So, it is meaningless to use the amdgpu_irq_put function in gmc_v11_0_hw_fini, which also leads to the call trace. [ 102.980303] Call Trace: [ 102.980303] [ 102.980304] gmc_v11_0_hw_fini+0x54/0x90 [amdgpu] [ 102.980357] gmc_v11_0_suspend+0xe/0x20 [amdgpu] [ 102.980409] amdgpu_device_ip_suspend_phase2+0x240/0x460 [amdgpu] [ 102.980459] amdgpu_device_ip_suspend+0x3d/0x80 [amdgpu] [ 102.980520] amdgpu_device_pre_asic_reset+0xd9/0x490 [amdgpu] [ 102.980573] amdgpu_device_gpu_recover.cold+0x548/0xce6 [amdgpu] [ 102.980687] amdgpu_debugfs_reset_work+0x4c/0x70 [amdgpu] [ 102.980740] process_one_work+0x21f/0x3f0 [ 102.980741] worker_thread+0x200/0x3e0 [ 102.980742] ? process_one_work+0x3f0/0x3f0 [ 102.980743] kthread+0xfd/0x130 [ 102.980743] ? kthread_complete_and_exit+0x20/0x20 [ 102.980744] ret_from_fork+0x22/0x30 Signed-off-by: Horatio Zhang Reviewed-by: Hawking Zhang Reviewed-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 3828ca95899f..f73c238f3145 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -951,7 +951,6 @@ static int gmc_v11_0_hw_fini(void *handle) return 0; } - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v11_0_gart_disable(adev); From c953cf040687992cfa066acca7d1c12e25fcec3e Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Tue, 25 Apr 2023 10:52:28 +0800 Subject: [PATCH 142/861] drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v10_0_hw_fini The gmc.ecc_irq is enabled by firmware per IFWI setting, and the host driver is not privileged to enable/disable the interrupt. So, it is meaningless to use the amdgpu_irq_put function in gmc_v10_0_hw_fini, which also leads to the call trace. [ 82.340264] Call Trace: [ 82.340265] [ 82.340269] gmc_v10_0_hw_fini+0x83/0xa0 [amdgpu] [ 82.340447] gmc_v10_0_suspend+0xe/0x20 [amdgpu] [ 82.340623] amdgpu_device_ip_suspend_phase2+0x127/0x1c0 [amdgpu] [ 82.340789] amdgpu_device_ip_suspend+0x3d/0x80 [amdgpu] [ 82.340955] amdgpu_device_pre_asic_reset+0xdd/0x2b0 [amdgpu] [ 82.341122] amdgpu_device_gpu_recover.cold+0x4dd/0xbb2 [amdgpu] [ 82.341359] amdgpu_debugfs_reset_work+0x4c/0x70 [amdgpu] [ 82.341529] process_one_work+0x21d/0x3f0 [ 82.341535] worker_thread+0x1fa/0x3c0 [ 82.341538] ? process_one_work+0x3f0/0x3f0 [ 82.341540] kthread+0xff/0x130 [ 82.341544] ? kthread_complete_and_exit+0x20/0x20 [ 82.341547] ret_from_fork+0x22/0x30 Signed-off-by: Horatio Zhang Reviewed-by: Hawking Zhang Reviewed-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 23d4081eca00..5697b66bf0de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -1143,7 +1143,6 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); return 0; From 4e004146c0bab0c05d66dc648593e8b7ec3d8df5 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Wed, 26 Apr 2023 07:18:38 -0400 Subject: [PATCH 143/861] drm/amd/display: set variable custom_backlight_curve0 storage-class-specifier to static smatch reports drivers/gpu/drm/amd/amdgpu/../display/modules/power/power_helpers.c:119:31: warning: symbol 'custom_backlight_curve0' was not declared. Should it be static? This variable is only used in its defining file, so it should be static Signed-off-by: Tom Rix Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/power/power_helpers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 68d95b92df76..30349881a283 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -116,7 +116,7 @@ static const struct abm_parameters * const abm_settings[] = { abm_settings_config2, }; -const struct dm_bl_data_point custom_backlight_curve0[] = { +static const struct dm_bl_data_point custom_backlight_curve0[] = { {2, 14}, {4, 16}, {6, 18}, {8, 21}, {10, 23}, {12, 26}, {14, 29}, {16, 32}, {18, 35}, {20, 38}, {22, 41}, {24, 44}, {26, 48}, {28, 52}, {30, 55}, {32, 59}, {34, 62}, {36, 67}, {38, 71}, {40, 75}, {42, 80}, {44, 84}, {46, 88}, {48, 93}, {50, 98}, From b94f1cc93db72078ad2da02adf1818f5e9122cb7 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 20 Apr 2023 09:59:01 -0400 Subject: [PATCH 144/861] drm/amd/display: return status of dmub_srv_get_fw_boot_status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc with W=1 reports drivers/gpu/drm/amd/amdgpu/../display/dc/dc_dmub_srv.c: In function ‘dc_dmub_srv_optimized_init_done’: drivers/gpu/drm/amd/amdgpu/../display/dc/dc_dmub_srv.c:184:26: error: variable ‘dmub’ set but not used [-Werror=unused-but-set-variable] 184 | struct dmub_srv *dmub; | ^~~~ The return status is never set. It looks like a call to dmub_srv_get_fw_boot_status is missing. Fixes: 499e4b1c722e ("drm/amd/display: add mechanism to skip DCN init") Signed-off-by: Tom Rix Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index d15ec32243e2..36d936ab4300 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -182,14 +182,23 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub; - union dmub_fw_boot_status status; + struct dc_context *dc_ctx; + union dmub_fw_boot_status boot_status; + enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; dmub = dc_dmub_srv->dmub; + dc_ctx = dc_dmub_srv->ctx; - return status.bits.optimized_init_done; + status = dmub_srv_get_fw_boot_status(dmub, &boot_status); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error querying DMUB boot status: error=%d\n", status); + return false; + } + + return boot_status.bits.optimized_init_done; } bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, From 7a4685cdfbdb94f1cf5ea2ddde824d94bf414708 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Wed, 26 Apr 2023 11:05:49 +0800 Subject: [PATCH 145/861] drm/amdgpu: fix a build warning by a typo in amdgpu_gfx.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should be a typo when intruducing multi-xx support. Reported-by: kernel test robot Signed-off-by: Guchun Chen Cc: Le Ma Reviewed-by: Le Ma Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 60bb4bba1994..2cf1f88fde48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -470,8 +470,8 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) for (i = 0; i < adev->gfx.num_compute_rings; i++) { j = i + xcc_id * adev->gfx.num_compute_rings; - ring = &adev->gfx.compute_ring[i]; - kfree(adev->gfx.mec.mqd_backup[i]); + ring = &adev->gfx.compute_ring[j]; + kfree(adev->gfx.mec.mqd_backup[j]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); From 34305ac364dbee1b8e4e5307599c8b9afff0b01c Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Wed, 26 Apr 2023 11:27:46 +0800 Subject: [PATCH 146/861] drm/amdgpu: check correct allocated mqd_backup object after alloc Instead of the default one, check the right mqd_backup object. Signed-off-by: Guchun Chen Cc: Le Ma Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2cf1f88fde48..66b9740ec376 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -379,7 +379,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, unsigned mqd_size, int xcc_id) { - int r, i; + int r, i, j; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring; @@ -431,7 +431,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, /* create MQD for each KCQ */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; + j = i + xcc_id * adev->gfx.num_compute_rings; + ring = &adev->gfx.compute_ring[j]; if (!ring->mqd_obj) { r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, @@ -443,8 +444,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[i + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[i]) + adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); + if (!adev->gfx.mec.mqd_backup[j]) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } } From 3c4f6507abde5c36e892d63b25296fc6d9b13285 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Thu, 27 Apr 2023 09:21:05 +0800 Subject: [PATCH 147/861] drm/amdgpu: mark gfx_v9_4_3_disable_gpa_mode() static This was left global by accident, the corresponding functions for other hardware types are already static: drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:1072:6: error: no previous prototype for function 'gfx_v9_4_3_disable_gpa_mode' [-Werror,-Wmissing-prototypes] Fixes: 86301129698b ("drm/amdgpu: split gc v9_4_3 functionality from gc v9_0") Reported-by: kernel test robot Signed-off-by: Arnd Bergmann Reviewed-by: Guchun Chen Signed-off-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 56a415e151d4..312491455382 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1075,7 +1075,7 @@ static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) } } -void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; From 1d74159021e9a4e58c422f0b91e2a6fcb884c54f Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 25 Apr 2023 14:11:56 -0400 Subject: [PATCH 148/861] drm/ttm: Helper function to get TTM mem limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper function to get TTM memory limit. This is needed by KFD to set its own internal memory limits. Signed-off-by: Mukul Joshi Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_tt.c | 6 ++++++ include/drm/ttm/ttm_tt.h | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index ab725d9d14a6..feac2f2c736e 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -450,3 +450,9 @@ ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, return &iter_tt->base; } EXPORT_SYMBOL(ttm_kmap_iter_tt_init); + +unsigned long ttm_tt_pages_limit(void) +{ + return ttm_pages_limit; +} +EXPORT_SYMBOL(ttm_tt_pages_limit); diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 977ca195a536..a4eff85b1f44 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -222,7 +222,7 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, struct ttm_tt *tt); - +unsigned long ttm_tt_pages_limit(void); #if IS_ENABLED(CONFIG_AGP) #include From f1f6f48a338cdab96efef712dbef6b1e279583e2 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 25 Apr 2023 14:13:36 -0400 Subject: [PATCH 149/861] drm/amdgpu: Set GTT size equal to TTM mem limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the helper function in TTM to get TTM mem limit and set GTT size to be equal to TTL mem limit. Signed-off-by: Mukul Joshi Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 25 ++++++------------------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 02b827785e39..957b18bda4a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -244,7 +244,6 @@ extern int amdgpu_sg_display; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) -#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2cd081cbf706..f61f07575f63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1803,26 +1803,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); - /* Compute GTT size, either based on 1/2 the size of RAM size - * or whatever the user passed on module init */ - if (amdgpu_gtt_size == -1) { - struct sysinfo si; - - si_meminfo(&si); - /* Certain GL unit tests for large textures can cause problems - * with the OOM killer since there is no way to link this memory - * to a process. This was originally mitigated (but not necessarily - * eliminated) by limiting the GTT size. The problem is this limit - * is often too low for many modern games so just make the limit 1/2 - * of system memory which aligns with TTM. The OOM accounting needs - * to be addressed, but we shouldn't prevent common 3D applications - * from being usable just to potentially mitigate that corner case. - */ - gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - (u64)si.totalram * si.mem_unit / 2); - } else { + /* Compute GTT size, either based on TTM limit + * or whatever the user passed on module init. + */ + if (amdgpu_gtt_size == -1) + gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT; + else gtt_size = (uint64_t)amdgpu_gtt_size << 20; - } /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); From 27fb73a0e3aa7478bcb5d2d59d65eec3c68fc165 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 25 Apr 2023 14:19:05 -0400 Subject: [PATCH 150/861] drm/amdkfd: Update KFD TTM mem limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the helper function in TTM to get TTM memory limit and set KFD's internal mem limit. This ensures that KFD's TTM mem limit and actual TTM mem limit are exactly same. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index fed8bb9a721f..a46285841d17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -53,7 +53,6 @@ int amdgpu_amdkfd_init(void) amdgpu_amdkfd_total_mem_size *= si.mem_unit; ret = kgd2kfd_init(); - amdgpu_amdkfd_gpuvm_init_mem_limits(); kfd_initialized = !ret; return ret; @@ -143,6 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) int i; int last_valid_bit; + amdgpu_amdkfd_gpuvm_init_mem_limits(); + if (adev->kfd.dev) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4432e169fae8..c3990a5eb7c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -36,6 +36,7 @@ #include #include "amdgpu_xgmi.h" #include "kfd_smi_events.h" +#include /* Userptr restore delay, just long enough to allow consecutive VM * changes to accumulate @@ -110,13 +111,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) struct sysinfo si; uint64_t mem; + if (kfd_mem_limit.max_system_mem_limit) + return; + si_meminfo(&si); mem = si.freeram - si.freehigh; mem *= si.mem_unit; spin_lock_init(&kfd_mem_limit.mem_limit_lock); kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); - kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); + kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT; pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", (kfd_mem_limit.max_system_mem_limit >> 20), (kfd_mem_limit.max_ttm_mem_limit >> 20)); From 514987a5bc3eb9bc86946f8013a9bdf732a29ef8 Mon Sep 17 00:00:00 2001 From: Chia-I Wu Date: Wed, 26 Apr 2023 15:54:55 -0700 Subject: [PATCH 151/861] drm/amdgpu: add a missing lock for AMDGPU_SCHED MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mgr->ctx_handles should be protected by mgr->lock. v2: improve commit message v3: add a Fixes tag Signed-off-by: Chia-I Wu Reviewed-by: Christian König Fixes: 52c6a62c64fa ("drm/amdgpu: add interface for editing a foreign process's priority v3") Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index e9b45089a28a..863b2a34b2d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; + struct amdgpu_ctx_mgr *mgr; struct amdgpu_ctx *ctx; uint32_t id; int r; @@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, return r; } - idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id) + mgr = &fpriv->ctx_mgr; + mutex_lock(&mgr->lock); + idr_for_each_entry(&mgr->ctx_handles, ctx, id) amdgpu_ctx_priority_override(ctx, priority); + mutex_unlock(&mgr->lock); fdput(f); return 0; From 64e2e71737acad867577deac92c1bec62e8b57d1 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Thu, 27 Apr 2023 16:58:50 +0800 Subject: [PATCH 152/861] drm/amdgpu: Add SDMA_UTCL1_WR_FIFO_SED field for sdma_v4_4_ras_field Query sdma_utcl1_wr_fifo_sed fiel to detect UTCL1_WR_FIFO SED error counts Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c index 6f9895cdddb1..0ddb6955a6d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c @@ -141,6 +141,10 @@ static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = { SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED), 0, 0, }, + { "SDMA_UTCL1_WR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_WR_FIFO_SED), + 0, 0, + }, { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED), 0, 0, From 9f58341d63bd26e93cca66e9e1ca850d3c40b0c3 Mon Sep 17 00:00:00 2001 From: Horace Chen Date: Tue, 25 Apr 2023 13:15:32 +0800 Subject: [PATCH 153/861] drm/amdgpu: disable SDMA WPTR_POLL_ENABLE for SR-IOV [Why] This WPTR_POLL_ENABLE is a hardware contigious polling which will cause FCLK and UCLK to keep on a high level. Mostly its case can be covered by F32_WPTR_POLL_ENABLE which polls by firmware. So to save power, SR-IOV also needs to disable this bit Signed-off-by: Horace Chen Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index eb722830531f..3d9a80511a45 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -510,10 +510,7 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1); - else - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); From a6f7baa387a32940e364978f72c1c150a29a219d Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 27 Apr 2023 20:12:31 +0530 Subject: [PATCH 154/861] drm/amd/amdgpu: Simplify switch case statements in amdgpu_connectors.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following checkpatch errors: ERROR: trailing statements should be on next line ERROR: space required after that ',' (ctx:VxV) ERROR: code indent should use tabs where possible Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_connectors.c | 39 ++++++++++++++----- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 6be30dcb029d..d34037b85cf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -593,11 +593,20 @@ static int amdgpu_connector_set_property(struct drm_connector *connector, switch (val) { default: - case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; - case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; - case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; - case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; + case DRM_MODE_SCALE_NONE: + rmx_type = RMX_OFF; + break; + case DRM_MODE_SCALE_CENTER: + rmx_type = RMX_CENTER; + break; + case DRM_MODE_SCALE_ASPECT: + rmx_type = RMX_ASPECT; + break; + case DRM_MODE_SCALE_FULLSCREEN: + rmx_type = RMX_FULL; + break; } + if (amdgpu_encoder->rmx_type == rmx_type) return 0; @@ -799,12 +808,21 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector, } switch (value) { - case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; - case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; - case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; + case DRM_MODE_SCALE_NONE: + rmx_type = RMX_OFF; + break; + case DRM_MODE_SCALE_CENTER: + rmx_type = RMX_CENTER; + break; + case DRM_MODE_SCALE_ASPECT: + rmx_type = RMX_ASPECT; + break; default: - case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; + case DRM_MODE_SCALE_FULLSCREEN: + rmx_type = RMX_FULL; + break; } + if (amdgpu_encoder->rmx_type == rmx_type) return 0; @@ -1127,7 +1145,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) /* assume digital unless load detected otherwise */ amdgpu_connector->use_digital = true; lret = encoder_funcs->detect(encoder, connector); - DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret); + DRM_DEBUG_KMS("load_detect %x returned: %x\n", + encoder->encoder_type, lret); if (lret == connector_status_connected) amdgpu_connector->use_digital = false; } @@ -1991,7 +2010,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { if (i2c_bus->valid) { connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; + DRM_CONNECTOR_POLL_DISCONNECT; } } else connector->polled = DRM_CONNECTOR_POLL_HPD; From 04b3c34f5cb2994a5e466659ea4aa962c0fc09a6 Mon Sep 17 00:00:00 2001 From: Xiaomeng Hou Date: Thu, 27 Apr 2023 13:38:47 +0800 Subject: [PATCH 155/861] drm/amdgpu: remove pasid_src field from IV entry PASID_SRC is not actually present in the Interrupt Packet, the field is taken as reserved bits now. So remove it from IV entry to avoid misuse. Signed-off-by: Xiaomeng Hou Reviewed-by: Aaron Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index d58353c89e59..fceb3b384955 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -271,7 +271,6 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, entry->timestamp_src = dw[2] >> 31; entry->pasid = dw[3] & 0xffff; entry->node_id = (dw[3] >> 16) & 0xff; - entry->pasid_src = dw[3] >> 31; entry->src_data[0] = dw[4]; entry->src_data[1] = dw[5]; entry->src_data[2] = dw[6]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 7a8e686bdd41..1c747ac4129a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -54,7 +54,6 @@ struct amdgpu_iv_entry { unsigned timestamp_src; unsigned pasid; unsigned node_id; - unsigned pasid_src; unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW]; const uint32_t *iv_entry; }; From d446127107e8185c2dc750cd4d0c5ff697a694f6 Mon Sep 17 00:00:00 2001 From: YuBiao Wang Date: Fri, 28 Apr 2023 14:00:40 +0800 Subject: [PATCH 156/861] drm/amdgpu: Enable mcbp under sriov by default Enable mcbp under sriov by default. Asics with soc21 supports mcbp now so we should set it enabled. Signed-off-by: YuBiao Wang Reviewed-by: Horace Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 9dd474262c29..1311e42ab8e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -65,8 +65,8 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->cg_flags = 0; adev->pg_flags = 0; - /* enable mcbp for sriov asic_type before soc21 */ - amdgpu_mcbp = (adev->asic_type < CHIP_IP_DISCOVERY) ? 1 : 0; + /* enable mcbp for sriov */ + amdgpu_mcbp = 1; } From 8fa76350587b6deb8a95d83f9cb23ce7599587b5 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 28 Apr 2023 16:04:59 +0530 Subject: [PATCH 157/861] drm/amd/amdgpu: Fix style problems in amdgpu_debugfs.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following issues reported by checkpatch: WARNING: please, no space before tabs WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: sizeof *rd should be sizeof(*rd) WARNING: Missing a blank line after declarations WARNING: sizeof rd->id should be sizeof(rd->id) WARNING: static const char * array should probably be static const char * const WARNING: Symbolic permissions 'S_IRUGO' are not preferred. Consider using octal permissions '0444'. WARNING: Prefer seq_puts to seq_printf ERROR: space prohibited after that open parenthesis '(' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 28 +++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index cc64ae550bc1..df94cd2c4b39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -56,14 +56,14 @@ * * Bit 62: Indicates a GRBM bank switch is needed * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is - * zero) + * zero) * Bits 24..33: The SE or ME selector if needed * Bits 34..43: The SH (or SA) or PIPE selector if needed * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed * * Bit 23: Indicates that the PM power gating lock should be held - * This is necessary to read registers that might be - * unreliable during a power gating transistion. + * This is necessary to read registers that might be + * unreliable during a power gating transistion. * * The lower bits are the BYTE offset of the register to read. This * allows reading multiple registers in a single call and having @@ -76,7 +76,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, ssize_t result = 0; int r; bool pm_pg_lock, use_bank, use_ring; - unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; + unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; pm_pg_lock = use_bank = use_ring = false; instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; @@ -208,7 +208,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) { struct amdgpu_debugfs_regs2_data *rd; - rd = kzalloc(sizeof *rd, GFP_KERNEL); + rd = kzalloc(sizeof(*rd), GFP_KERNEL); if (!rd) return -ENOMEM; rd->adev = file_inode(file)->i_private; @@ -221,6 +221,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) { struct amdgpu_debugfs_regs2_data *rd = file->private_data; + mutex_destroy(&rd->lock); kfree(file->private_data); return 0; @@ -324,7 +325,8 @@ static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigne switch (cmd) { case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: mutex_lock(&rd->lock); - r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id); + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, + sizeof(rd->id)); mutex_unlock(&rd->lock); return r ? -EINVAL : 0; default: @@ -863,7 +865,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, * The offset being sought changes which wave that the status data * will be returned for. The bits are used as follows: * - * Bits 0..6: Byte offset into data + * Bits 0..6: Byte offset into data * Bits 7..14: SE selector * Bits 15..22: SH/SA selector * Bits 23..30: CU/{WGP+SIMD} selector @@ -1429,7 +1431,7 @@ static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_gfxoff_residency_fops, }; -static const char *debugfs_regs_names[] = { +static const char * const debugfs_regs_names[] = { "amdgpu_regs", "amdgpu_regs2", "amdgpu_regs_didt", @@ -1447,7 +1449,7 @@ static const char *debugfs_regs_names[] = { /** * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide - * register access. + * register access. * * @adev: The device to attach the debugfs entries to */ @@ -1459,7 +1461,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | S_IRUGO, root, + S_IFREG | 0444, root, adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); @@ -1494,12 +1496,12 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) kthread_park(ring->sched.thread); } - seq_printf(m, "run ib test:\n"); + seq_puts(m, "run ib test:\n"); r = amdgpu_ib_ring_tests(adev); if (r) seq_printf(m, "ib ring tests failed (%d).\n", r); else - seq_printf(m, "ib ring tests passed.\n"); + seq_puts(m, "ib ring tests passed.\n"); /* go on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { @@ -1978,7 +1980,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_debugfs_ring_init(adev, ring); } - for ( i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (!amdgpu_vcnfw_log) break; From 44407010ce7f524e4f68aa25f9250a9cf9e4410c Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Wed, 26 Apr 2023 12:24:12 -0700 Subject: [PATCH 158/861] drm/amd/display: Fix possible NULL dereference in dc_dmub_srv_cmd_run_list() We have a NULL check for 'dc_dmub_srv' in dc_dmub_srv_cmd_run_list() but we are dereferencing it before checking. Fix this moving the dereference next to NULL check. This issue is found with Smatch(static analysis tool). Fixes: e97cc04fe0fb ("drm/amd/display: refactor dmub commands into single function") Signed-off-by: Harshit Mogalapalli Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 36d936ab4300..0319a30f2d5c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) { - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; int i; @@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; + dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; for (i = 0 ; i < count; i++) { From 9420a034060a094874091cb6c6257268f08a1999 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Tue, 25 Apr 2023 08:34:17 +0530 Subject: [PATCH 159/861] drm/amd/display: Add logging for eDP v1.4 supported sink rates Include eDP v1.4 panels supported sink rates in debug output, useful info for knowing optimized link rates Cc: Aurabindo Pillai Cc: Jerry Zuo Reviewed-by: Rodrigo Siqueira Signed-off-by: Srinivasan Shanmugam Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 577f74cc50fb..f46864630506 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1951,6 +1951,9 @@ void detect_edp_sink_caps(struct dc_link *link) link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + supported_link_rates[entry]) * 200; + DC_LOG_DC("%s: eDP v1.4 supported sink rates: [%d] %d kHz\n", __func__, + entry / 2, link_rate_in_khz); + if (link_rate_in_khz != 0) { link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; From 435af0b919bf9eb78f4e05e8596ebed9ca7885b7 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 24 Apr 2023 14:46:16 -0400 Subject: [PATCH 160/861] drm/amdkfd: Optimize svm range map to GPU with XNACK on With XNACK on if svm_range_set_attr set the range access or access_in_place attribute, we don't call svm_range_validate_and_map to update GPU mapping. This avoids prefaulting the range pages on system memory if the range is not prefetch to VRAM and not mapped to GPUs. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 96a138a39515..c02430537e9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -735,7 +735,9 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, case KFD_IOCTL_SVM_ATTR_ACCESS: case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: case KFD_IOCTL_SVM_ATTR_NO_ACCESS: - *update_mapping = true; + if (!p->xnack_enabled) + *update_mapping = true; + gpuidx = kfd_process_gpuidx_from_gpuid(p, attrs[i].value); if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { From 6ae869b9b62534eaf04f838048338bf150bc6ce4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 24 Apr 2023 12:50:30 -0400 Subject: [PATCH 161/861] drm/amdgpu/gfx11: drop old bring up code No longer used. Remove it. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 70 ++------------------------ 1 file changed, 3 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8a4c4769e607..646003b2faf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -3680,55 +3680,6 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, return 0; } -#ifdef BRING_UP_DEBUG -static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct v11_gfx_mqd *mqd = ring->mqd_ptr; - - /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ - WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); - WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); - - /* set GFX_MQD_BASE */ - WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); - WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); - - /* set GFX_MQD_CONTROL */ - WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); - - /* set GFX_HQD_VMID to 0 */ - WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); - - WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY, - mqd->cp_gfx_hqd_queue_priority); - WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); - - /* set GFX_HQD_BASE, similar as CP_RB_BASE */ - WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); - WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); - - /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ - WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); - WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); - - /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ - WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); - - /* set RB_WPTR_POLL_ADDR */ - WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); - WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); - - /* set RB_DOORBELL_CONTROL */ - WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); - - /* active the queue */ - WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); - - return 0; -} -#endif - static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3740,9 +3691,6 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) mutex_lock(&adev->srbm_mutex); soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); amdgpu_ring_init_mqd(ring); -#ifdef BRING_UP_DEBUG - gfx_v11_0_gfx_queue_init_register(ring); -#endif soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) @@ -3755,13 +3703,6 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) ring->wptr = 0; *ring->wptr_cpu_addr = 0; amdgpu_ring_clear_ring(ring); -#ifdef BRING_UP_DEBUG - mutex_lock(&adev->srbm_mutex); - soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - gfx_v11_0_gfx_queue_init_register(ring); - soc21_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -#endif } else { amdgpu_ring_clear_ring(ring); } @@ -3769,7 +3710,6 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) return 0; } -#ifndef BRING_UP_DEBUG static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) { struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -3791,7 +3731,6 @@ static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) return amdgpu_ring_test_helper(kiq_ring); } -#endif static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { @@ -3815,11 +3754,11 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) if (r) goto done; } -#ifndef BRING_UP_DEBUG + r = gfx_v11_0_kiq_enable_kgq(adev); if (r) goto done; -#endif + r = gfx_v11_0_cp_gfx_start(adev); if (r) goto done; @@ -4453,7 +4392,6 @@ static int gfx_v11_0_hw_init(void *handle) return r; } -#ifndef BRING_UP_DEBUG static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) { struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -4476,7 +4414,6 @@ static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) return r; } -#endif static int gfx_v11_0_hw_fini(void *handle) { @@ -4488,13 +4425,12 @@ static int gfx_v11_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (!adev->no_hw_access) { -#ifndef BRING_UP_DEBUG if (amdgpu_async_gfx_ring) { r = gfx_v11_0_kiq_disable_kgq(adev); if (r) DRM_ERROR("KGQ disable failed\n"); } -#endif + if (amdgpu_gfx_disable_kcq(adev, 0)) DRM_ERROR("KCQ disable failed\n"); From edacf33357b8576db3198e4ae41bb7e6cd41ce4e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 24 Apr 2023 12:52:31 -0400 Subject: [PATCH 162/861] drm/amdgpu/gfx10: drop old bring up code No longer used. Remove it. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 70 ++------------------------ 1 file changed, 3 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8bd07ff59671..25be4485dcd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6423,55 +6423,6 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, return 0; } -#ifdef BRING_UP_DEBUG -static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct v10_gfx_mqd *mqd = ring->mqd_ptr; - - /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); - - /* set GFX_MQD_BASE */ - WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); - WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); - - /* set GFX_MQD_CONTROL */ - WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); - - /* set GFX_HQD_VMID to 0 */ - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); - - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY, - mqd->cp_gfx_hqd_queue_priority); - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); - - /* set GFX_HQD_BASE, similar as CP_RB_BASE */ - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); - - /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); - - /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); - - /* set RB_WPTR_POLL_ADDR */ - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); - - /* set RB_DOORBELL_CONTROL */ - WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); - - /* active the queue */ - WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); - - return 0; -} -#endif - static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -6492,9 +6443,6 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1) gfx_v10_0_cp_gfx_set_doorbell(adev, ring); -#ifdef BRING_UP_DEBUG - gfx_v10_0_gfx_queue_init_register(ring); -#endif nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) @@ -6507,13 +6455,6 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) ring->wptr = 0; *ring->wptr_cpu_addr = 0; amdgpu_ring_clear_ring(ring); -#ifdef BRING_UP_DEBUG - mutex_lock(&adev->srbm_mutex); - nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - gfx_v10_0_gfx_queue_init_register(ring); - nv_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -#endif } else { amdgpu_ring_clear_ring(ring); } @@ -6521,7 +6462,6 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) return 0; } -#ifndef BRING_UP_DEBUG static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) { struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -6543,7 +6483,6 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) return amdgpu_ring_test_helper(kiq_ring); } -#endif static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { @@ -6567,11 +6506,11 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) if (r) goto done; } -#ifndef BRING_UP_DEBUG + r = gfx_v10_0_kiq_enable_kgq(adev); if (r) goto done; -#endif + r = gfx_v10_0_cp_gfx_start(adev); if (r) goto done; @@ -7239,7 +7178,6 @@ static int gfx_v10_0_hw_init(void *handle) return r; } -#ifndef BRING_UP_DEBUG static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) { struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -7261,7 +7199,6 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) else return 0; } -#endif static int gfx_v10_0_hw_fini(void *handle) { @@ -7272,13 +7209,12 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (!adev->no_hw_access) { -#ifndef BRING_UP_DEBUG if (amdgpu_async_gfx_ring) { r = gfx_v10_0_kiq_disable_kgq(adev); if (r) DRM_ERROR("KGQ disable failed\n"); } -#endif + if (amdgpu_gfx_disable_kcq(adev, 0)) DRM_ERROR("KCQ disable failed\n"); } From f14c8c3e1fc9e10c6d54999a96acb2b5087374df Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 28 Apr 2023 17:22:20 +0530 Subject: [PATCH 163/861] drm/amd/amdgpu: Fix style problems in amdgpu_psp.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following checkpatch warnings & error in amdgpu_psp.c WARNING: Comparisons should place the constant on the right side of the test WARNING: braces {} are not necessary for single statement blocks WARNING: please, no space before tabs WARNING: braces {} are not necessary for single statement blocks ERROR: that open brace { should be on the previous line Suggested-by: Christian König Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 51 ++++++++++--------------- 1 file changed, 20 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c58654a8b6c5..aa37b703c718 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -411,7 +411,7 @@ static int psp_sw_init(void *handle) if ((psp_get_runtime_db_entry(adev, PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, &scpm_entry)) && - (SCPM_DISABLE != scpm_entry.scpm_status)) { + (scpm_entry.scpm_status != SCPM_DISABLE)) { adev->scpm_enabled = true; adev->scpm_status = scpm_entry.scpm_status; } else { @@ -458,10 +458,9 @@ static int psp_sw_init(void *handle) if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) { - ret= psp_sysfs_init(adev); - if (ret) { + ret = psp_sysfs_init(adev); + if (ret) return ret; - } } ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, @@ -645,7 +644,7 @@ psp_cmd_submit_buf(struct psp_context *psp, skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); - memcpy((void*)&cmd->resp, (void*)&psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); + memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); /* In some cases, psp response status is not 0 even there is no * problem while the command is submitted. Some version of PSP FW @@ -830,7 +829,7 @@ static int psp_tmr_load(struct psp_context *psp) } static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, - struct psp_gfx_cmd_resp *cmd) + struct psp_gfx_cmd_resp *cmd) { if (amdgpu_sriov_vf(psp->adev)) cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; @@ -1067,7 +1066,7 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, struct ta_context *context) { cmd->cmd_id = context->ta_load_type; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; @@ -1138,9 +1137,8 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context) context->resp_status = cmd->resp.status; - if (!ret) { + if (!ret) context->session_id = cmd->resp.session_id; - } release_psp_cmd_buf(psp); @@ -1467,8 +1465,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_ras_intr_triggered()) return ret; - if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) - { + if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { DRM_WARN("RAS: Unsupported Interface"); return -EINVAL; } @@ -1478,8 +1475,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) dev_warn(psp->adev->dev, "ECC switch disabled\n"); ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; - } - else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) + } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) dev_warn(psp->adev->dev, "RAS internal register access blocked\n"); @@ -1575,11 +1571,10 @@ int psp_ras_initialize(struct psp_context *psp) if (ret) dev_warn(adev->dev, "PSP set boot config failed\n"); else - dev_warn(adev->dev, "GECC will be disabled in next boot cycle " - "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); + dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); } } else { - if (1 == boot_cfg) { + if (boot_cfg == 1) { dev_info(adev->dev, "GECC is enabled\n"); } else { /* enable GECC in next boot cycle if it is disabled @@ -2365,7 +2360,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, } static int psp_execute_non_psp_fw_load(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) + struct amdgpu_firmware_info *ucode) { int ret = 0; struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); @@ -2404,9 +2399,8 @@ static int psp_load_smu_fw(struct psp_context *psp) (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); - if (ret) { + if (ret) DRM_WARN("Failed to set MP1 state prepare for reload\n"); - } } ret = psp_execute_non_psp_fw_load(psp, ucode); @@ -2716,9 +2710,8 @@ static int psp_suspend(void *handle) } ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); - if (ret) { + if (ret) DRM_ERROR("PSP ring stop failed\n"); - } out: return ret; @@ -2967,7 +2960,7 @@ static int parse_sos_bin_descriptor(struct psp_context *psp, psp->sos.fw_version = le32_to_cpu(desc->fw_version); psp->sos.feature_version = le32_to_cpu(desc->fw_version); psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); - psp->sos.start_addr = ucode_start_addr; + psp->sos.start_addr = ucode_start_addr; break; case PSP_FW_TYPE_PSP_SYS_DRV: psp->sys.fw_version = le32_to_cpu(desc->fw_version); @@ -3491,7 +3484,7 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size drm_dev_exit(idx); } -static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(usbc_pd_fw, 0644, psp_usbc_pd_fw_sysfs_read, psp_usbc_pd_fw_sysfs_write); @@ -3674,8 +3667,7 @@ static void psp_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); } -const struct amdgpu_ip_block_version psp_v3_1_ip_block = -{ +const struct amdgpu_ip_block_version psp_v3_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, .major = 3, .minor = 1, @@ -3683,8 +3675,7 @@ const struct amdgpu_ip_block_version psp_v3_1_ip_block = .funcs = &psp_ip_funcs, }; -const struct amdgpu_ip_block_version psp_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version psp_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, .major = 10, .minor = 0, @@ -3692,8 +3683,7 @@ const struct amdgpu_ip_block_version psp_v10_0_ip_block = .funcs = &psp_ip_funcs, }; -const struct amdgpu_ip_block_version psp_v11_0_ip_block = -{ +const struct amdgpu_ip_block_version psp_v11_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, .major = 11, .minor = 0, @@ -3709,8 +3699,7 @@ const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { .funcs = &psp_ip_funcs, }; -const struct amdgpu_ip_block_version psp_v12_0_ip_block = -{ +const struct amdgpu_ip_block_version psp_v12_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, .major = 12, .minor = 0, From 9ab367f838a692843aa21b2ff43ebfc3bafa7175 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 19 Apr 2023 17:30:39 +0530 Subject: [PATCH 164/861] drm/amd/display: Add logging when DP link training Channel EQ is Successful Log when Channel Equalization is successful. Cc: Aurabindo Pillai Cc: Fangzhi Zuo Reviewed-by: Rodrigo Siqueira Signed-off-by: Srinivasan Shanmugam Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../dc/link/protocols/link_dp_training_128b_132b.c | 10 ++++++++-- .../dc/link/protocols/link_dp_training_8b_10b.c | 4 ++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c index 23d380f09a21..db87cfe37b5c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c @@ -211,11 +211,17 @@ enum link_training_result dp_perform_128b_132b_link_training( dpcd_set_link_settings(link, lt_settings); - if (result == LINK_TRAINING_SUCCESS) + if (result == LINK_TRAINING_SUCCESS) { result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); + if (result == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); + } - if (result == LINK_TRAINING_SUCCESS) + if (result == LINK_TRAINING_SUCCESS) { result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); + if (result == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: CDS done.\n", __func__); + } return result; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 3889ebb2256b..2b4c15b0b407 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -388,6 +388,8 @@ enum link_training_result dp_perform_8b_10b_link_training( link_res, lt_settings, repeater_id); + if (status == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); repeater_training_done(link, repeater_id); @@ -409,6 +411,8 @@ enum link_training_result dp_perform_8b_10b_link_training( link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); } } From 5b7bee5b445785a47b781e889e51219ec35d4407 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Fri, 14 Apr 2023 12:46:48 -0400 Subject: [PATCH 165/861] drm/amd/display: Workaround wrong HDR colorimetry with some receivers [Why] Some scalers do not pick up color space updates unless the DP link is disabled/re-enabled which can result in incorrect/washed out HDR colors in some cases. [How] Call set_dpms_on to disable the link, re-train and re-enable with the updated output color space. Reviewed-by: Aric Cyr Acked-by: Alan Liu Signed-off-by: Ilya Bakoulin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 7 +++++++ drivers/gpu/drm/amd/display/dc/dc.h | 1 + 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 89a245e3c9ac..965b348dc8f1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3268,6 +3268,13 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.prepare_bandwidth(dc, dc->current_state); dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); } + } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space + && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { + /* + * Workaround for firmware issue in some receivers where they don't pick up + * correct output color space unless DP link is disabled/re-enabled + */ + dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4424e7abb801..892e3adb99d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1506,6 +1506,7 @@ struct dc_link { /* Forced DPIA into TBT3 compatibility mode. */ bool dpia_forced_tbt3_mode; bool dongle_mode_timing_override; + bool blank_stream_on_ocs_change; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; From 9749a42db74c3400e0526d9a39fa0324abfd0d66 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Thu, 13 Apr 2023 17:34:24 -0400 Subject: [PATCH 166/861] drm/amd/display: Change default Z8 watermark values [Why & How] Previous Z8 watermark values were causing flickering and OTC underflow. Updating Z8 watermark values based on the measurement. Reviewed-by: Nicholas Kazlauskas Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Alan Liu Signed-off-by: Leo Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 19370b872a91..1d00eb9e73c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -149,8 +149,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = { .num_states = 5, .sr_exit_time_us = 16.5, .sr_enter_plus_exit_time_us = 18.5, - .sr_exit_z8_time_us = 210.0, - .sr_enter_plus_exit_z8_time_us = 310.0, + .sr_exit_z8_time_us = 268.0, + .sr_enter_plus_exit_z8_time_us = 393.0, .writeback_latency_us = 12.0, .dram_channel_width_bytes = 4, .round_trip_ping_latency_dcfclk_cycles = 106, From 4dfa60aede755814dcfbc9a05008265d827cc98d Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Thu, 13 Apr 2023 17:37:49 -0400 Subject: [PATCH 167/861] drm/amd/display: Adding debug option to override Z8 watermark values [Why & How] Adding debug options to override Z8 watermark values for testing purposes. Reviewed-by: Nicholas Kazlauskas Acked-by: Alan Liu Signed-off-by: Leo Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 4 ++++ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 11 +++++++++++ 2 files changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 892e3adb99d9..fea68383bb61 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -700,6 +700,8 @@ struct dc_virtual_addr_space_config { struct dc_bounding_box_overrides { int sr_exit_time_ns; int sr_enter_plus_exit_time_ns; + int sr_exit_z8_time_ns; + int sr_enter_plus_exit_z8_time_ns; int urgent_latency_ns; int percent_of_ideal_drambw; int dram_clock_change_latency_ns; @@ -769,6 +771,8 @@ struct dc_debug_options { int sr_enter_plus_exit_time_dpm0_ns; int sr_exit_time_ns; int sr_enter_plus_exit_time_ns; + int sr_exit_z8_time_ns; + int sr_enter_plus_exit_z8_time_ns; int urgent_latency_ns; uint32_t underflow_assert_delay_us; int percent_of_ideal_drambw; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index a5b1e4bb1a22..3407f9a2c6a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1890,6 +1890,17 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; } + if ((int)(bb->sr_exit_z8_time_us * 1000) + != dc->bb_overrides.sr_exit_z8_time_ns + && dc->bb_overrides.sr_exit_z8_time_ns) { + bb->sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + } + + if ((int)(bb->sr_enter_plus_exit_z8_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_z8_time_ns + && dc->bb_overrides.sr_enter_plus_exit_z8_time_ns) { + bb->sr_enter_plus_exit_z8_time_us = dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + } if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns && dc->bb_overrides.urgent_latency_ns) { bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; From f835a571ec998b83c165022795f9385f9335f108 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Wed, 19 Apr 2023 18:17:14 -0400 Subject: [PATCH 168/861] drm/amd/display: filter out invalid bits in pipe_fuses [Why] Reading pipe_fuses from register may have invalid bits set, which may affect the num_pipes erroneously. [How] Add read_pipes_fuses() call and filter bits based on expected number of pipes. Reviewed-by: Alvin Lee Acked-by: Alan Liu Signed-off-by: Samson Tam Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 10 +++++++++- .../gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 7feeba78c0c9..a49323885874 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2082,6 +2082,14 @@ static struct resource_funcs dcn32_res_pool_funcs = { .restore_mall_state = dcn32_restore_mall_state, }; +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* DCN32 support max 4 pipes */ + value = value & 0xf; + return value; +} + static bool dcn32_resource_construct( uint8_t num_virtual_links, @@ -2125,7 +2133,7 @@ static bool dcn32_resource_construct( pool->base.res_cap = &res_cap_dcn32; /* max number of pipes for ASIC before checking for pipe fuses */ num_pipes = pool->base.res_cap->num_timing_generator; - pipe_fuses = REG_READ(CC_DC_PIPE_DIS); + pipe_fuses = read_pipe_fuses(ctx); for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) if (pipe_fuses & 1 << i) diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 63bd6928c82f..4c1e0f5a5f09 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1633,6 +1633,14 @@ static struct resource_funcs dcn321_res_pool_funcs = { .restore_mall_state = dcn32_restore_mall_state, }; +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* DCN321 support max 4 pipes */ + value = value & 0xf; + return value; +} + static bool dcn321_resource_construct( uint8_t num_virtual_links, @@ -1675,7 +1683,7 @@ static bool dcn321_resource_construct( pool->base.res_cap = &res_cap_dcn321; /* max number of pipes for ASIC before checking for pipe fuses */ num_pipes = pool->base.res_cap->num_timing_generator; - pipe_fuses = REG_READ(CC_DC_PIPE_DIS); + pipe_fuses = read_pipe_fuses(ctx); for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) if (pipe_fuses & 1 << i) From 029c85adccb2e23352f622394288ecd133449332 Mon Sep 17 00:00:00 2001 From: Leo Ma Date: Fri, 21 Apr 2023 09:58:25 -0400 Subject: [PATCH 169/861] drm/amd/display: revert "Update scaler recout data for visual confirm" This reverts commit 1068e987ad0be83a109147fe7fa0891700e8d80e. A regression is found on this change, so revert it for the time being and resubmit when issue is fixed. Reviewed-by: Martin Leung Acked-by: Alan Liu Signed-off-by: Leo Ma Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/core/dc_resource.c | 17 ----------------- .../drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 18 +++++++++++++++++- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 193e09b05f5a..7e1e5532f88f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -69,9 +69,6 @@ #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" -#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 -#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 -#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define DC_LOGGER_INIT(logger) @@ -811,8 +808,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) struct rect surf_clip = plane_state->clip_rect; bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; int split_count, split_idx; - struct dpp *dpp = pipe_ctx->plane_res.dpp; - unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) @@ -881,18 +876,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) data->recout.width = data->h_active - data->recout.x; } } - - /* Check bounds to ensure the VC bar height was set to a sane value */ - if (dpp != NULL) { - if ((dpp->ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && - (dpp->ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { - visual_confirm_rect_height = dpp->ctx->dc->debug.visual_confirm_rect_height; - } - - if (dpp->ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) - data->recout.height = data->recout.height - - 2 * (dpp->inst + visual_confirm_rect_height); - } } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 7e140c35a0ce..b33955928bd0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,6 +39,9 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 +#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 +#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 +#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define REG(reg)\ dpp->tf_regs->reg @@ -588,6 +591,18 @@ static void dpp1_dscl_set_manual_ratio_init( static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { + int visual_confirm_on = 0; + unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; + + if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) + visual_confirm_on = 1; + + /* Check bounds to ensure the VC bar height was set to a sane value */ + if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && + (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { + visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; + } + REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -598,7 +613,8 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, /* Number of RECOUT horizontal pixels */ RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height); + RECOUT_HEIGHT, recout->height + - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); } /** From 124155c0bd4a4ed822c1ba246bdf5123b42c3ad9 Mon Sep 17 00:00:00 2001 From: George Shen Date: Thu, 13 Apr 2023 19:03:59 -0400 Subject: [PATCH 170/861] drm/amd/display: Add w/a to disable DP dual mode on certain ports [Why] Certain ports on DCN3.2 configs do not properly populate the BIOS info table flag to indicate DP dual mode is unsupported. [How] Add a workaround to disable DP dual mode on the ports with the missing BIOS info table flag. Reviewed-by: Michael Strauss Acked-by: Alan Liu Signed-off-by: George Shen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 1 + drivers/gpu/drm/amd/display/dc/link/link_detection.c | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index fea68383bb61..fe60816653d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -887,6 +887,7 @@ struct dc_debug_options { bool override_odm_optimization; bool minimize_dispclk_using_odm; bool disable_subvp_high_refresh; + bool disable_dp_plus_plus_wa; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index a49323885874..4de2f8813dce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -729,6 +729,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_fpo_vactive = false, .disable_boot_optimizations = false, .disable_subvp_high_refresh = true, + .disable_dp_plus_plus_wa = true, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index a131e30fd7d6..17904de4f155 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -593,6 +593,10 @@ static bool detect_dp(struct dc_link *link, /* DP SST branch */ link->type = dc_connection_sst_branch; } else { + if (link->dc->debug.disable_dp_plus_plus_wa && + link->link_enc->features.flags.bits.IS_UHBR20_CAPABLE) + return false; + /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, sink_caps, From 6d7d0a4bc39240a2dd84d122d4ed5487e6acddf7 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 22 Apr 2023 23:09:38 -0400 Subject: [PATCH 171/861] drm/amd/display: [FW Promotion] Release 0.0.165.0 - Add dmub boot options to disable ips states on init Acked-by: Alan Liu Signed-off-by: Anthony Koo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 09d4d0d9b92c..af1f50742371 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -398,6 +398,12 @@ enum dmub_lvtma_status_bit { DMUB_LVTMA_STATUS_BIT_EDP_ON = (1 << 1), }; +enum dmub_ips_disable_type { + DMUB_IPS_DISABLE_IPS1 = 1, + DMUB_IPS_DISABLE_IPS2 = 2, + DMUB_IPS_DISABLE_IPS2_Z10 = 3, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -423,7 +429,9 @@ union dmub_fw_boot_options { uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ - uint32_t reserved : 13; /**< reserved */ + uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ + uint32_t ips_disable: 2; /* options to disable ips support*/ + uint32_t reserved : 10; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; From 4ab9d5848c728e5339e382f678a5e580573b3a3a Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 23 Apr 2023 22:12:05 -0400 Subject: [PATCH 172/861] drm/amd/display: 3.2.234 This version brings along following fixes: - FW Release 0.0.165.0 - Add w/a to disable DP dual mode on certain ports - Revert "Update scaler recout data for visual confirm" - Filter out invalid bits in pipe_fuses - Adding debug option to override Z8 watermark values - Change default Z8 watermark values - Workaround wrong HDR colorimetry with some receivers Acked-by: Alan Liu Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index fe60816653d0..7e3f20a3a02f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.233" +#define DC_VER "3.2.234" #define MAX_SURFACES 3 #define MAX_PLANES 6 From 1156e1a60f024ce29fbb88dd330c2be81c4303ea Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 24 Apr 2023 14:24:20 -0400 Subject: [PATCH 173/861] drm/amdgpu: add [en/dis]able_kgq() functions To replace the IP specific variants which are largely duplicate. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 68 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 + 2 files changed, 70 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 66b9740ec376..90f5d302d5f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -519,6 +519,39 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) return r; } +int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + int i, r = 0; + int j; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock(&kiq->ring_lock); + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * + adev->gfx.num_gfx_rings)) { + spin_unlock(&kiq->ring_lock); + return -ENOMEM; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + j = i + xcc_id * adev->gfx.num_gfx_rings; + kiq->pmf->kiq_unmap_queues(kiq_ring, + &adev->gfx.gfx_ring[i], + PREEMPT_QUEUES, 0, 0); + } + } + + if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) + r = amdgpu_ring_test_helper(kiq_ring); + spin_unlock(&kiq->ring_lock); + + return r; +} + int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit) { @@ -590,6 +623,41 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) return r; } +int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + int r, i, j; + + if (!kiq->pmf || !kiq->pmf->kiq_map_queues) + return -EINVAL; + + spin_lock(&kiq->ring_lock); + /* No need to map kcq on the slave */ + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * + adev->gfx.num_gfx_rings); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + spin_unlock(&adev->gfx.kiq[0].ring_lock); + return r; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + j = i + xcc_id * adev->gfx.num_gfx_rings; + kiq->pmf->kiq_map_queues(kiq_ring, + &adev->gfx.gfx_ring[i]); + } + } + + r = amdgpu_ring_test_helper(kiq_ring); + spin_unlock(&kiq->ring_lock); + if (r) + DRM_ERROR("KCQ enable failed\n"); + + return r; +} + /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f1a2ce7b6aca..2755f00ac19a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -433,6 +433,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); From f39c25357f0c9ada9ed9676eaa7681d4583dda63 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 24 Apr 2023 14:31:00 -0400 Subject: [PATCH 174/861] drm/amdgpu/gfx10: use generic [en/dis]able_kgq() helpers And remove the duplicate local variants. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 48 ++------------------------ 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 25be4485dcd5..24d7134228b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6462,28 +6462,6 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) -{ - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; - int r, i; - - if (!kiq->pmf || !kiq->pmf->kiq_map_queues) - return -EINVAL; - - r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * - adev->gfx.num_gfx_rings); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - return r; - } - - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); - - return amdgpu_ring_test_helper(kiq_ring); -} - static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { int r, i; @@ -6507,7 +6485,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) goto done; } - r = gfx_v10_0_kiq_enable_kgq(adev); + r = amdgpu_gfx_enable_kgq(adev, 0); if (r) goto done; @@ -7178,28 +7156,6 @@ static int gfx_v10_0_hw_init(void *handle) return r; } -static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) -{ - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - int i; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * - adev->gfx.num_gfx_rings)) - return -ENOMEM; - - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], - PREEMPT_QUEUES, 0, 0); - if (!adev->job_hang) - return amdgpu_ring_test_helper(kiq_ring); - else - return 0; -} - static int gfx_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -7210,7 +7166,7 @@ static int gfx_v10_0_hw_fini(void *handle) if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { - r = gfx_v10_0_kiq_disable_kgq(adev); + r = amdgpu_gfx_disable_kgq(adev, 0); if (r) DRM_ERROR("KGQ disable failed\n"); } From 83033f72a40b5035c78f847fa4cc55fba633b82a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 24 Apr 2023 14:33:15 -0400 Subject: [PATCH 175/861] drm/amdgpu/gfx11: use generic [en/dis]able_kgq() helpers And remove the duplicate local variants. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 49 ++------------------------ 1 file changed, 2 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 646003b2faf8..d36d365cb582 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -3710,28 +3710,6 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) -{ - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; - int r, i; - - if (!kiq->pmf || !kiq->pmf->kiq_map_queues) - return -EINVAL; - - r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * - adev->gfx.num_gfx_rings); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - return r; - } - - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); - - return amdgpu_ring_test_helper(kiq_ring); -} - static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { int r, i; @@ -3755,7 +3733,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) goto done; } - r = gfx_v11_0_kiq_enable_kgq(adev); + r = amdgpu_gfx_enable_kgq(adev, 0); if (r) goto done; @@ -4392,29 +4370,6 @@ static int gfx_v11_0_hw_init(void *handle) return r; } -static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) -{ - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - int i, r = 0; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * - adev->gfx.num_gfx_rings)) - return -ENOMEM; - - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], - PREEMPT_QUEUES, 0, 0); - - if (adev->gfx.kiq[0].ring.sched.ready) - r = amdgpu_ring_test_helper(kiq_ring); - - return r; -} - static int gfx_v11_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -4426,7 +4381,7 @@ static int gfx_v11_0_hw_fini(void *handle) if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { - r = gfx_v11_0_kiq_disable_kgq(adev); + r = amdgpu_gfx_disable_kgq(adev, 0); if (r) DRM_ERROR("KGQ disable failed\n"); } From 5a0510d58b635ed5bc2555ba1255cd2b34e033ee Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 28 Apr 2023 16:42:03 -0500 Subject: [PATCH 176/861] drm/amd: Downgrade message about watermarks table after s0i3 to debug This message shows up on s0i3 resume for DCN31 and DCN314 platforms but it has been decided that this flow won't be changed and the message is expected behavior. Downgrade the message to debug. Signed-off-by: Mario Limonciello Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 2 +- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 0827c7df2855..32279c5db724 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -130,7 +130,7 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, if (result == VBIOSSMC_Result_Failed) { if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) - DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + DC_LOG_DEBUG("Watermarks table not configured properly by SMU"); else ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index 0765334f0825..07baa10a8647 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -145,7 +145,7 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, if (result == VBIOSSMC_Result_Failed) { if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && param == TABLE_WATERMARKS) - DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + DC_LOG_DEBUG("Watermarks table not configured properly by SMU"); else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq || msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk) DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS"); From d78e816a3d615073a48bf8cc438790bc08160e0e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 14:42:09 -0400 Subject: [PATCH 177/861] drm/amdgpu/gfx10: drop unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just check the return value directly. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 24d7134228b0..5c67c91c4297 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7159,15 +7159,13 @@ static int gfx_v10_0_hw_init(void *handle) static int gfx_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { - r = amdgpu_gfx_disable_kgq(adev, 0); - if (r) + if (amdgpu_gfx_disable_kgq(adev, 0)) DRM_ERROR("KGQ disable failed\n"); } From b848fe65f856193fc4f567c84f559f60ff66a3db Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 14:42:53 -0400 Subject: [PATCH 178/861] drm/amdgpu/gfx11: drop unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just check the return value directly. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d36d365cb582..256014a8c824 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4373,7 +4373,6 @@ static int gfx_v11_0_hw_init(void *handle) static int gfx_v11_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -4381,8 +4380,7 @@ static int gfx_v11_0_hw_fini(void *handle) if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { - r = amdgpu_gfx_disable_kgq(adev, 0); - if (r) + if (amdgpu_gfx_disable_kgq(adev, 0)) DRM_ERROR("KGQ disable failed\n"); } From 42cdf6f687daa1a1992db400e8311e1e49c787fb Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 14:44:43 -0400 Subject: [PATCH 179/861] drm/amdgpu/gfx8: always restore kcq MQDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always restore the MQD not just when we do a reset. This allows us to move the MQD to VRAM if we want. v2: always reset ring pointer as well (Christian) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5de44d7e92de..2ae7f167985f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4651,15 +4651,13 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); - } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ - /* reset MQD to a clean status */ + } else { + /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; } From 45b54a7dd3437632352ed28163e982233ef190a8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 14:49:00 -0400 Subject: [PATCH 180/861] drm/amdgpu/gfx9: always restore kcq MQDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always restore the MQD not just when we do a reset. This allows us to move the MQD to VRAM if we want. v2: always reset ring pointer as well (Christian) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 ++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8fb027cf1bfb..4a7556099469 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3589,17 +3589,14 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); - } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ - /* reset MQD to a clean status */ + } else { + /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); - /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 312491455382..b5924543e363 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1799,17 +1799,14 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); - } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ - /* reset MQD to a clean status */ + } else { + /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); - /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; From 2dbaf83998fe4772c83e5060cfbc21808758bb4b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 14:51:34 -0400 Subject: [PATCH 181/861] drm/amdgpu/gfx10: always restore kcq/kgq MQDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always restore the MQD not just when we do a reset. This allows us to move the MQD to VRAM if we want. v2: always reset ring pointer as well (Christian) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 5c67c91c4297..2e0234b43f43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6447,16 +6447,14 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (amdgpu_in_reset(adev)) { - /* reset mqd with the backup copy */ + } else { + /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; *ring->wptr_cpu_addr = 0; amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; @@ -6780,17 +6778,14 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ - /* reset MQD to a clean status */ + } else { + /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); - /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; From dae343b343ff741d727312b2a9b03d86e64b31c5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 1 May 2023 16:31:53 +0200 Subject: [PATCH 182/861] drm/amd/display: mark amdgpu_dm_connector_funcs_force static A global function without a header prototype has made it into linux-next during the merge window: drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:6339:6: error: no previous prototype for 'amdgpu_dm_connector_funcs_force' [-Werror=missing-prototypes] Mark the function static instead, as there are no other callers outside this file. Fixes: 0ba4a784a145 ("drm/amd/display: implement force function in amdgpu_dm_connector_funcs") Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202304251640.JClqTim9-lkp@intel.com/ Signed-off-by: Arnd Bergmann Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dd56e1512b0a..e278474e4a5b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6340,7 +6340,7 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) return 0; } -void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) +static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; From c6fa6fe9ebd5cdde0e41a558ff3efea8fe988e77 Mon Sep 17 00:00:00 2001 From: Thong Thai Date: Mon, 1 May 2023 11:04:36 -0400 Subject: [PATCH 183/861] drm/amdgpu/nv: update VCN 3 max HEVC encoding resolution Update the maximum resolution reported for HEVC encoding on VCN 3 devices to reflect its 8K encoding capability. v2: Also update the max height for H.264 encoding to match spec. (Ruijing) Signed-off-by: Thong Thai Reviewed-by: Ruijing Dong Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 3cc068974bcd..a5f76c9538c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = }; /* Sienna Cichlid */ +static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_encode = { + .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array), + .codec_array = sc_video_codecs_encode_array, +}; + static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, @@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = /* SRIOV Sienna Cichlid, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, }; static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = @@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, } else { if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn1; } else { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; } @@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, case IP_VERSION(3, 0, 16): case IP_VERSION(3, 0, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; return 0; case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &yc_video_codecs_decode; return 0; From 09d8a67912462a07cb7ff40956ed8c79b1c74564 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 14:54:27 -0400 Subject: [PATCH 184/861] drm/amdgpu/gfx11: always restore kcq/kgq MQDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always restore the MQD not just when we do a reset. This allows us to move the MQD to VRAM if we want. v2: always reset ring pointer as well (Christian) Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 256014a8c824..f09e2558e73b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -3695,16 +3695,14 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (amdgpu_in_reset(adev)) { - /* reset mqd with the backup copy */ + } else { + /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; *ring->wptr_cpu_addr = 0; amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; @@ -4043,17 +4041,14 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ - /* reset MQD to a clean status */ + } else { + /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); - /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); - } else { - amdgpu_ring_clear_ring(ring); } return 0; From 0e768043bf68dc9713de34780363c595e54af7fa Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Thu, 27 Apr 2023 14:01:05 +0800 Subject: [PATCH 185/861] drm/amdgpu: set gfx9 onwards APU atomics support to be true APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is internal path w/ native atomic support. Set have_atomics_support to true. Signed-off-by: Yifan Zhang Reviewed-by: Lang Yu Acked-by: Felix Kuehling Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1787602fe582..ae0bcffa2591 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3758,6 +3758,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a + * internal path natively support atomics, set have_atomics_support to true. + */ + else if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) + adev->have_atomics_support = true; else adev->have_atomics_support = !pci_enable_atomic_ops_to_root(adev->pdev, From 9e690184586bfb88efa176cdf912414f6c53519c Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 3 May 2023 14:10:20 +0530 Subject: [PATCH 186/861] drm/amd/amdgpu: Fix errors & warnings in amdgpu _bios, _cs, _dma_buf, _fence.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following checkpatch errors & warning is removed. ERROR: else should follow close brace '}' ERROR: trailing statements should be on next line WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: Possible repeated word: 'Fences' WARNING: Missing a blank line after declarations WARNING: braces {} are not necessary for single statement blocks WARNING: Comparisons should place the constant on the right side of the test WARNING: printk() should include KERN_ facility level Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 16 +++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 14 ++++++++------ 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 30c28a69e847..b582b83c4984 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -104,9 +104,8 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); bios = ioremap_wc(vram_base, size); - if (!bios) { + if (!bios) return false; - } adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { @@ -133,9 +132,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) adev->bios = NULL; /* XXX: some cards may return 0 for rom size? ddx has a workaround */ bios = pci_map_rom(adev->pdev, &size); - if (!bios) { + if (!bios) return false; - } adev->bios = kzalloc(size, GFP_KERNEL); if (adev->bios == NULL) { @@ -168,9 +166,9 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) header[AMD_VBIOS_SIGNATURE_END] = 0; if ((!AMD_IS_VALID_VBIOS(header)) || - 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET], - AMD_VBIOS_SIGNATURE, - strlen(AMD_VBIOS_SIGNATURE))) + memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET], + AMD_VBIOS_SIGNATURE, + strlen(AMD_VBIOS_SIGNATURE)) != 0) return false; /* valid vbios, go on */ @@ -264,7 +262,7 @@ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios, status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); if (ACPI_FAILURE(status)) { - printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); + DRM_ERROR("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); return -ENODEV; } @@ -363,7 +361,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) struct acpi_table_header *hdr; acpi_size tbl_size; UEFI_ACPI_VFCT *vfct; - unsigned offset; + unsigned int offset; if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c3b3287dc29e..5270089aee16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -397,7 +397,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, { struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned num_deps; + unsigned int num_deps; int i, r; num_deps = chunk->length_dw * 4 / @@ -468,7 +468,7 @@ static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i, r; num_deps = chunk->length_dw * 4 / @@ -486,7 +486,7 @@ static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i, r; num_deps = chunk->length_dw * 4 / @@ -506,7 +506,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i; num_deps = chunk->length_dw * 4 / @@ -540,7 +540,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i; num_deps = chunk->length_dw * 4 / @@ -758,6 +758,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, if (used_vis_vram < total_vis_vram) { u64 free_vis_vram = total_vis_vram - used_vis_vram; + adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + increment_us, us_upper_bound); @@ -1076,9 +1077,8 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, /* the IB should be reserved at this point */ r = amdgpu_bo_kmap(aobj, (void **)&kptr); - if (r) { + if (r) return r; - } kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); @@ -1392,7 +1392,7 @@ error_cleanup: /* Cleanup the parser structure */ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) { - unsigned i; + unsigned int i; amdgpu_sync_free(&parser->sync); for (i = 0; i < parser->num_post_deps; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 0c001bb8fc2b..6dceaf40625b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -149,7 +149,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, if (!bo->tbo.pin_count) { /* move buffer into GTT or VRAM */ struct ttm_operation_ctx ctx = { false, false }; - unsigned domains = AMDGPU_GEM_DOMAIN_GTT; + unsigned int domains = AMDGPU_GEM_DOMAIN_GTT; if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && attach->peer2peer) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index f52d0ba91a77..1994eafd3e71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -42,7 +42,6 @@ #include "amdgpu_reset.h" /* - * Fences * Fences mark an event in the GPUs pipeline and are used * for GPU/CPU synchronization. When the fence is written, * it is expected that all buffers associated with that fence @@ -140,7 +139,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * Returns 0 on success, -ENOMEM on failure. */ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, - unsigned flags) + unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; @@ -174,11 +173,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd adev->fence_context + ring->idx, seq); /* Against remove in amdgpu_job_{free, free_cb} */ dma_fence_get(fence); - } - else + } else { dma_fence_init(fence, &amdgpu_fence_ops, &ring->fence_drv.lock, adev->fence_context + ring->idx, seq); + } } amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, @@ -396,7 +395,7 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, * Returns the number of emitted fences on the ring. Used by the * dynpm code to ring track activity. */ -unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) +unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) { uint64_t emitted; @@ -475,7 +474,7 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, */ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, - unsigned irq_type) + unsigned int irq_type) { struct amdgpu_device *adev = ring->adev; uint64_t index; @@ -653,6 +652,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) continue; @@ -840,6 +840,7 @@ static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) continue; @@ -913,6 +914,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work) reset_work); struct amdgpu_reset_context reset_context; + memset(&reset_context, 0, sizeof(reset_context)); reset_context.method = AMD_RESET_METHOD_NONE; From 967a66396e4668ef314e06a0391e67aa1cf6d058 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Wed, 26 Apr 2023 09:46:54 +0800 Subject: [PATCH 187/861] drm/amdgpu: drop redundant sched job cleanup when cs is aborted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Once command submission failed due to userptr invalidation in amdgpu_cs_submit, legacy code will perform cleanup of scheduler job. However, it's not needed at all, as former commit has integrated job cleanup stuff into amdgpu_job_free. Otherwise, because of double free, a NULL pointer dereference will occur in such scenario. Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2457 Fixes: f7d66fb2ea43 ("drm/amdgpu: cleanup scheduler job initialization v2") Signed-off-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5270089aee16..6e1d331af01f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1305,7 +1305,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, r = drm_sched_job_add_dependency(&leader->base, fence); if (r) { dma_fence_put(fence); - goto error_cleanup; + return r; } } @@ -1332,7 +1332,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } if (r) { r = -EAGAIN; - goto error_unlock; + mutex_unlock(&p->adev->notifier_lock); + return r; } p->fence = dma_fence_get(&leader->base.s_fence->finished); @@ -1379,14 +1380,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, mutex_unlock(&p->adev->notifier_lock); mutex_unlock(&p->bo_list->bo_list_mutex); return 0; - -error_unlock: - mutex_unlock(&p->adev->notifier_lock); - -error_cleanup: - for (i = 0; i < p->gang_size; ++i) - drm_sched_job_cleanup(&p->jobs[i]->base); - return r; } /* Cleanup the parser structure */ From 1cfb4d6121276a829aa94d0e32a7f5e1830ebc21 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 26 Apr 2023 15:30:13 -0400 Subject: [PATCH 188/861] drm/amdgpu: put MQDs in VRAM Reduces preemption latency. Only enable this for gfx10 and 11 for now to avoid changing behavior on gfx 8 and 9. v2: move MES MQDs into VRAM as well (YuBiao) v3: enable on gfx10, 11 only (Alex) v4: minor style changes, document why gfx10/11 only (Alex) Reviewed-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 9 +++++++-- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 1 + drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 90f5d302d5f3..b91be56ba773 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -382,6 +382,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, int r, i, j; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring; + u32 domain = AMDGPU_GEM_DOMAIN_GTT; + + /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) + domain |= AMDGPU_GEM_DOMAIN_VRAM; /* create MQD for KIQ */ if (!adev->enable_mes_kiq && !ring->mqd_obj) { @@ -413,7 +418,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring = &adev->gfx.gfx_ring[i]; if (!ring->mqd_obj) { r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + domain, &ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) { dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); @@ -435,7 +440,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring = &adev->gfx.compute_ring[j]; if (!ring->mqd_obj) { r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + domain, &ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) { dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 0599f8a6813e..4560476c7c31 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -901,6 +901,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, return 0; r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e853bcb892fc..3adb450eec07 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -999,6 +999,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, return 0; r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) { From 29551fd90ee69771f499071c1119c1007febbd1d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 27 Apr 2023 14:09:33 -0400 Subject: [PATCH 189/861] drm/amdgpu: drop invalid IP revision MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was already fixed and dropped in: commit baf3f8f37406 ("drm/amdgpu: handle SRIOV VCN revision parsing") commit c40bdfb2ffa4 ("drm/amdgpu: fix incorrect VCN revision in SRIOV") But seems to have been accidently been left around in a merge. Reviewed-by: Luben Tuikov Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 0ba013275dc1..76ceca05452e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1939,7 +1939,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 0, 2): - case IP_VERSION(3, 0, 192): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); From 0e5f625157ca47d9ee27a36310611964bd97c605 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 27 Apr 2023 14:32:11 -0400 Subject: [PATCH 190/861] drm/amdgpu: drop unused function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu_discovery_get_ip_version() has not been used since commit c40bdfb2ffa4 ("drm/amdgpu: fix incorrect VCN revision in SRIOV") so drop it. Reviewed-by: Luben Tuikov Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 48 ------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h | 2 - 2 files changed, 50 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 76ceca05452e..b58d94dc1924 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1208,54 +1208,6 @@ next_ip: return 0; } -int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, - int *major, int *minor, int *revision) -{ - struct binary_header *bhdr; - struct ip_discovery_header *ihdr; - struct die_header *dhdr; - struct ip *ip; - uint16_t die_offset; - uint16_t ip_offset; - uint16_t num_dies; - uint16_t num_ips; - int i, j; - - if (!adev->mman.discovery_bin) { - DRM_ERROR("ip discovery uninitialized\n"); - return -EINVAL; - } - - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); - num_dies = le16_to_cpu(ihdr->num_dies); - - for (i = 0; i < num_dies; i++) { - die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); - num_ips = le16_to_cpu(dhdr->num_ips); - ip_offset = die_offset + sizeof(*dhdr); - - for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); - - if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { - if (major) - *major = ip->major; - if (minor) - *minor = ip->minor; - if (revision) - *revision = ip->revision; - return 0; - } - ip_offset += struct_size(ip, base_address, ip->num_base_address); - } - } - - return -EINVAL; -} - static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { int vcn_harvest_count = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 8563dd4a7dc2..63ec6924b907 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -28,8 +28,6 @@ #define DISCOVERY_TMR_OFFSET (64 << 10) void amdgpu_discovery_fini(struct amdgpu_device *adev); -int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, - int *major, int *minor, int *revision); int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ From 7a6a2e59aacd6d5e75927352b72d39a5bc447e09 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 3 May 2023 18:09:18 +0300 Subject: [PATCH 191/861] drm/amdgpu: unlock the correct lock in amdgpu_gfx_enable_kcq() We changed which lock we are supposed to take but this error path was accidentally over looked so it still drops the old lock. Fixes: def799c6596d ("drm/amdgpu: add multi-xcc support to amdgpu_gfx interfaces (v4)") Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b91be56ba773..76dd2841cc0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -605,7 +605,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) kiq->pmf->set_resources_size); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&kiq->ring_lock); return r; } From f4409a2361152f3480781a1dea1a3bd0d8369c78 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 3 May 2023 18:09:35 +0300 Subject: [PATCH 192/861] drm/amdgpu: unlock on error in gfx_v9_4_3_kiq_resume() Smatch complains that we need to drop this lock before returning. drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:1838 gfx_v9_4_3_kiq_resume() warn: inconsistent returns 'ring->mqd_obj->tbo.base.resv'. Fixes: 86301129698b ("drm/amdgpu: split gc v9_4_3 functionality from gc v9_0") Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b5924543e363..c9ae3065a13d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1824,8 +1824,10 @@ static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev, int xcc_id) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v9_4_3_kiq_init_queue(ring, xcc_id); amdgpu_bo_kunmap(ring->mqd_obj); From c0c27428903700d86920394aa2302506b5d95b17 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Tue, 2 May 2023 11:59:08 -0400 Subject: [PATCH 193/861] drm/amdgpu: fix an amdgpu_irq_put() issue in gmc_v9_0_hw_fini() As made mention of in commit c56edea58c31 ("drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v10_0_hw_fini") and commit aa6ac247ed7d ("drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v11_0_hw_fini"). It is meaningless to call amdgpu_irq_put() for gmc.ecc_irq. So, remove it from gmc_v9_0_hw_fini(). Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2522 Fixes: c8b5a95b5709 ("drm/amdgpu: Fix desktop freezed after gpu-reset") Reviewed-by: Mario Limonciello Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 290804a06e05..6ae5cee9b64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1999,7 +1999,6 @@ static int gmc_v9_0_hw_fini(void *handle) if (adev->mmhub.funcs->update_power_gating) adev->mmhub.funcs->update_power_gating(adev, false); - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); return 0; From 4667fbe2f7c54628c48aedc8c8472cd12a5f7fcd Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 24 Nov 2021 17:24:58 +0800 Subject: [PATCH 194/861] drm/amdgpu: do gfxhub init for all XCDs Each XCD needs to do gfxhub init Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 545 +++++++++++++---------- 1 file changed, 299 insertions(+), 246 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index c59c6c85fbff..79af32bb078c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -43,19 +43,25 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint64_t page_table_base) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + int i; - WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - hub->ctx_addr_distance * vmid, - lower_32_bits(page_table_base)); + for (i = 0; i < adev->gfx.num_xcd; i++) { + WREG32_SOC15_OFFSET(GC, i, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); - WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - hub->ctx_addr_distance * vmid, - upper_32_bits(page_table_base)); + WREG32_SOC15_OFFSET(GC, i, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); + } } static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; + int i; if (adev->gmc.pdb0_bo) pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); @@ -67,26 +73,36 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - if (adev->gmc.pdb0_bo) { - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + for (i = 0; i < adev->gfx.num_xcd; i++) { + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.fb_start >> 12)); + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.fb_start >> 44)); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->gmc.gart_end >> 44)); - } else { - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.gart_start >> 12)); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.gart_start >> 44)); + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } else { + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->gmc.gart_end >> 44)); + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } } } @@ -94,160 +110,183 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; uint32_t tmp; + int i; - /* Program the AGP BAR */ - WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BASE, 0); - WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + for (i = 0; i < adev->gfx.num_xcd; i++) { + /* Program the AGP BAR */ + WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { - /* Program the system aperture low logical page number. */ - WREG32_SOC15_RLC(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the system aperture low logical page number. */ + WREG32_SOC15_RLC(GC, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - /* - * Raven2 has a HW issue that it is unable to use the - * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. - * So here is the workaround that increase system - * aperture high address (add 1) to get rid of the VM - * fault and hardware hang. - */ - WREG32_SOC15_RLC(GC, 0, - regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max((adev->gmc.fb_end >> 18) + 0x1, - adev->gmc.agp_end >> 18)); - else - WREG32_SOC15_RLC(GC, 0, - regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + /* + * Raven2 has a HW issue that it is unable to use the + * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. + * So here is the workaround that increase system + * aperture high address (add 1) to get rid of the VM + * fault and hardware hang. + */ + WREG32_SOC15_RLC(GC, i, + regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max((adev->gmc.fb_end >> 18) + 0x1, + adev->gmc.agp_end >> 18)); + else + WREG32_SOC15_RLC(GC, i, + regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); - /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); - WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); + WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); - /* Program "protection fault". */ - WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, - (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, - (u32)((u64)adev->dummy_page_addr >> 44)); + /* Program "protection fault". */ + WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); - tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); - WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); - } + tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); + } - /* In the case squeezing vram into GART aperture, we don't use - * FB aperture and AGP aperture. Disable them. - */ - if (adev->gmc.pdb0_bo) { - WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_TOP, 0); - WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); - WREG32_SOC15(GC, 0, regMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, regMC_VM_AGP_BOT, 0xFFFFFF); - WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); - WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + /* In the case squeezing vram into GART aperture, we don't use + * FB aperture and AGP aperture. Disable them. + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(GC, i, regMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(GC, i, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); + WREG32_SOC15(GC, i, regMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, i, regMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); + WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + } } } static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) { uint32_t tmp; + int i; - /* Setup TLB control */ - tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL); + for (i = 0; i < adev->gfx.num_xcd; i++) { + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC);/* XXX for emulation. */ - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); - WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + WREG32_SOC15_RLC(GC, i, regMC_VM_MX_L1_TLB_CNTL, tmp); + } } static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + int i; - /* Setup L2 cache */ - tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); - /* XXX for emulation, Refer to closed source code.*/ - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, - 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); - WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL, tmp); + for (i = 0; i < adev->gfx.num_xcd; i++) { + /* Setup L2 cache */ + tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, + 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL, tmp); - tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL2); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); - WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL2, tmp); + tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL2, tmp); - tmp = regVM_L2_CNTL3_DEFAULT; - if (adev->gmc.translate_further) { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, - L2_CACHE_BIGK_FRAGMENT_SIZE, 9); - } else { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, - L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + tmp = regVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL3, tmp); + + tmp = regVM_L2_CNTL4_DEFAULT; + if (adev->gmc.xgmi.connected_to_cpu) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + } + WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL4, tmp); } - WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL3, tmp); - - tmp = regVM_L2_CNTL4_DEFAULT; - if (adev->gmc.xgmi.connected_to_cpu) { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1); - } else { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); - } - WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL4, tmp); } static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) { uint32_t tmp; + int i; - tmp = RREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, - adev->gmc.vmid0_page_table_depth); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, - adev->gmc.vmid0_page_table_block_size); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); - WREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL, tmp); + for (i = 0; i < adev->gfx.num_xcd; i++) { + tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, + adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL, tmp); + } } static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) { - WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, - 0XFFFFFFFF); - WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, - 0x0000000F); + int i; - WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, - 0); - WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, - 0); + for (i = 0; i < adev->gfx.num_xcd; i++) { + WREG32_SOC15(GC, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0XFFFFFFFF); + WREG32_SOC15(GC, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); - WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); - WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + WREG32_SOC15(GC, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, + 0); + WREG32_SOC15(GC, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, + 0); + WREG32_SOC15(GC, i, + regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); + WREG32_SOC15(GC, i, + regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + } } static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) @@ -255,7 +294,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; unsigned num_level, block_size; uint32_t tmp; - int i; + int i, j; num_level = adev->vm_manager.num_level; block_size = adev->vm_manager.block_size; @@ -264,81 +303,89 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, i); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, - num_level); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, - 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - PAGE_TABLE_BLOCK_SIZE, - block_size); - /* Send no-retry XNACK on fault to suppress VM fault storm. - * On Aldebaran, XNACK can be enabled in the SQ per-process. - * Retry faults need to be enabled for that to work. - */ - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !adev->gmc.noretry || - adev->asic_type == CHIP_ALDEBARAN); - WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, - i * hub->ctx_distance, tmp); - WREG32_SOC15_OFFSET(GC, 0, - regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, - i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(GC, 0, - regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, - i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(GC, 0, - regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, - i * hub->ctx_addr_distance, - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(GC, 0, - regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, - i * hub->ctx_addr_distance, - upper_32_bits(adev->vm_manager.max_pfn - 1)); + for (j = 0; j < adev->gfx.num_xcd; j++) { + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + num_level); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + block_size); + /* Send no-retry XNACK on fault to suppress VM fault storm. + * On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !adev->gmc.noretry || + adev->asic_type == CHIP_ALDEBARAN); + WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(GC, j, + regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, j, + regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, j, + regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(GC, j, + regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } } } static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; - unsigned i; + unsigned i, j; - for (i = 0 ; i < 18; ++i) { - WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - i * hub->eng_addr_distance, 0xffffffff); - WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - i * hub->eng_addr_distance, 0x1f); + for (j = 0; j < adev->gfx.num_xcd; j++) { + for (i = 0 ; i < 18; ++i) { + WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } } } static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) { + int i; + + for (i = 0; i < adev->gfx.num_xcd; i++) { + if (amdgpu_sriov_vf(adev)) { /* * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are * VF copy registers so vbios post doesn't program them, for * SRIOV driver need to program them */ - WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); + WREG32_SOC15_RLC(GC, i, regMC_VM_FB_LOCATION_BASE, + adev->gmc.vram_start >> 24); + WREG32_SOC15_RLC(GC, i, regMC_VM_FB_LOCATION_TOP, + adev->gmc.vram_end >> 24); + } } /* GART Enable. */ @@ -361,27 +408,29 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; u32 tmp; - u32 i; + u32 i, j; - /* Disable all tables */ - for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_CNTL, - i * hub->ctx_distance, 0); + for (j = 0; j < adev->gfx.num_xcd; j++) { + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); - /* Setup TLB control */ - tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); - tmp = REG_SET_FIELD(tmp, - MC_VM_MX_L1_TLB_CNTL, - ENABLE_ADVANCED_DRIVER_MODEL, - 0); - WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, j, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, + MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, + 0); + WREG32_SOC15_RLC(GC, j, regMC_VM_MX_L1_TLB_CNTL, tmp); - /* Setup L2 cache */ - tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(GC, 0, regVM_L2_CNTL, tmp); - WREG32_SOC15(GC, 0, regVM_L2_CNTL3, 0); + /* Setup L2 cache */ + tmp = RREG32_SOC15(GC, j, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, j, regVM_L2_CNTL, tmp); + WREG32_SOC15(GC, j, regVM_L2_CNTL3, 0); + } } /** @@ -394,38 +443,42 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; - tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, - VM_L2_PROTECTION_FAULT_CNTL, - TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, - value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - if (!value) { + int i; + + for (i = 0; i < adev->gfx.num_xcd; i++) { + tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - CRASH_ON_NO_RETRY_FAULT, 1); + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - CRASH_ON_RETRY_FAULT, 1); + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, + VM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp); } - WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp); } static void gfxhub_v1_2_init(struct amdgpu_device *adev) From 1ffbc89c302669c95779c1e6b0901380544c9bb8 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 5 May 2023 15:37:31 +0800 Subject: [PATCH 195/861] drm/amdgpu: remove unneeded semicolon No functional modification involved. ./drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c:146:2-3: Unneeded semicolon. Reported-by: Abaci Robot Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=4871 Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 24d12075ca3a..a331a59c49e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -143,7 +143,7 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan break; default: break; - }; + } return; } From 738b3469f8e12ae72555ef4724bebe8167a93e29 Mon Sep 17 00:00:00 2001 From: Sung Lee Date: Fri, 21 Apr 2023 12:03:16 -0400 Subject: [PATCH 196/861] drm/amd/display: Add additional pstate registers to HW state query [WHY] These registers would be useful to know when debugging pstate issues. [HOW] Add additional registers to hw state query. Reviewed-by: Aric Cyr Reviewed-by: Jun Lei Acked-by: Alex Hung Signed-off-by: Sung Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 2 ++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 2 ++ 4 files changed, 16 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 0b17c2993ca5..09784222cc03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -690,6 +690,8 @@ struct dcn_hubp_state { uint32_t primary_surface_addr_hi; uint32_t primary_meta_addr_lo; uint32_t primary_meta_addr_hi; + uint32_t uclk_pstate_force; + uint32_t hubp_cntl; }; struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 0ddd310cc971..6eebcb22e317 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -628,6 +628,12 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); } + + if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL)) + hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL); + + if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL)) + hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL); } static const struct hubbub_funcs hubbub2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index e46bbe7ddcc9..2861d974fcf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -449,6 +449,12 @@ void hubp3_read_state(struct hubp *hubp) SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + if (REG(UCLK_PSTATE_FORCE)) + s->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE); + + if (REG(DCHUBP_CNTL)) + s->hubp_cntl = REG_READ(DCHUBP_CNTL); + } void hubp3_setup( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index aa80b3f2ca3f..aaa293613846 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -112,6 +112,8 @@ struct dcn_hubbub_state { uint32_t vm_error_pipe; uint32_t vm_error_mode; uint32_t test_debug_data; + uint32_t watermark_change_cntl; + uint32_t dram_state_cntl; }; struct hubbub_funcs { From b9274387bc2a4cf54b02e039b6a0aef5dd5f2936 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 26 Apr 2023 22:07:43 -0400 Subject: [PATCH 197/861] drm/amdkfd: Don't trigger evictions unmapping dmabuf attachments Don't move DMABuf attachments for PCIe P2P mappings to the SYSTEM domain when unmapping. This avoids triggering eviction fences unnecessarily. Instead do the move to SYSTEM and back to GTT when mapping these attachments to ensure the SG table gets updated after evictions. This may still trigger unnecessary evictions if user mode unmaps and remaps the same BO. However, this is unlikely in real applications. Signed-off-by: Felix Kuehling Reviewed-by: Eric Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c3990a5eb7c6..de6ba0d4b860 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -531,6 +531,12 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) { struct ttm_operation_ctx ctx = {.interruptible = true}; struct amdgpu_bo *bo = attachment->bo_va->base.bo; + int ret; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + return ret; amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); @@ -663,11 +669,10 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, static void kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) { - struct ttm_operation_ctx ctx = {.interruptible = true}; - struct amdgpu_bo *bo = attachment->bo_va->base.bo; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + /* This is a no-op. We don't want to trigger eviction fences when + * unmapping DMABufs. Therefore the invalidation (moving to system + * domain) is done in kfd_mem_dmamap_dmabuf. + */ } /** From 9e72813f69b178b676a54c4d6b24d3e84492b61a Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Sat, 6 May 2023 16:10:43 +0800 Subject: [PATCH 198/861] drm/amdgpu: Remove the unused variable golden_settings_gc_9_4_3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Variable golden_settings_gc_9_4_3 is not effectively used, so delete it. drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:48:38: warning: ‘golden_settings_gc_9_4_3’ defined but not used. Reported-by: Abaci Robot Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=4877 Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c9ae3065a13d..9d17dcfae130 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -45,10 +45,6 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L -static const struct soc15_reg_golden golden_settings_gc_9_4_3[] = { - -}; - static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); From bf7fda0b3736f93ac8b18e7147e1e7acd27e6a19 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 25 Apr 2023 14:42:29 -0600 Subject: [PATCH 199/861] drm/amd/display: Show the DCN/DCE version in the log Some times people send their dmesg log for debugging, and one common task is to check the modesetting line to catch which DCN/DCE we need to debug. This commit introduces a simple conversion from the DCN/DCE version to a string shown in the dmesg log. Reviewed-by: Hamza Mahfooz Acked-by: Alex Hung Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +- drivers/gpu/drm/amd/display/dc/dc_helper.c | 56 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dm_services.h | 2 + 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e278474e4a5b..e1c4db673dea 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1667,7 +1667,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { - DRM_INFO("Display Core initialized with v%s!\n", DC_VER); + DRM_INFO("Display Core initialized with v%s! %s\n", DC_VER, + dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); goto error; diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index a21948267c0f..3db7a2e11af5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -690,3 +690,59 @@ void reg_sequence_wait_done(const struct dc_context *ctx) dc_dmub_srv_wait_idle(ctx->dmub_srv); } } + +char *dce_version_to_string(const int version) +{ + switch (version) { + case DCE_VERSION_8_0: + return "DCE 8.0"; + case DCE_VERSION_8_1: + return "DCE 8.1"; + case DCE_VERSION_8_3: + return "DCE 8.3"; + case DCE_VERSION_10_0: + return "DCE 10.0"; + case DCE_VERSION_11_0: + return "DCE 11.0"; + case DCE_VERSION_11_2: + return "DCE 11.2"; + case DCE_VERSION_11_22: + return "DCE 11.22"; + case DCE_VERSION_12_0: + return "DCE 12.0"; + case DCE_VERSION_12_1: + return "DCE 12.1"; + case DCN_VERSION_1_0: + return "DCN 1.0"; + case DCN_VERSION_1_01: + return "DCN 1.0.1"; + case DCN_VERSION_2_0: + return "DCN 2.0"; + case DCN_VERSION_2_1: + return "DCN 2.1"; + case DCN_VERSION_2_01: + return "DCN 2.0.1"; + case DCN_VERSION_3_0: + return "DCN 3.0"; + case DCN_VERSION_3_01: + return "DCN 3.0.1"; + case DCN_VERSION_3_02: + return "DCN 3.0.2"; + case DCN_VERSION_3_03: + return "DCN 3.0.3"; + case DCN_VERSION_3_1: + return "DCN 3.1"; + case DCN_VERSION_3_14: + return "DCN 3.1.4"; + case DCN_VERSION_3_15: + return "DCN 3.1.5"; + case DCN_VERSION_3_16: + return "DCN 3.1.6"; + case DCN_VERSION_3_2: + return "DCN 3.2"; + case DCN_VERSION_3_21: + return "DCN 3.2.1"; + default: + return "Unknown"; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index d33d595405a9..d0eed3b4771e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -292,4 +292,6 @@ void dm_dtn_log_append_v(struct dc_context *ctx, void dm_dtn_log_end(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx); +char *dce_version_to_string(const int version); + #endif /* __DM_SERVICES_H__ */ From 187916e6ed9d0c3b3abc27429f7a5f8c936bd1f0 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Fri, 5 May 2023 20:14:15 +0800 Subject: [PATCH 200/861] drm/amdgpu: install stub fence into potential unused fence pointers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using cpu to update page tables, vm update fences are unused. Install stub fence into these fence pointers instead of NULL to avoid NULL dereference when calling dma_fence_wait() on them. Suggested-by: Christian König Signed-off-by: Lang Yu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3c0310576b3b..b6bd667df676 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1358,6 +1358,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); bo_va->ref_count = 1; + bo_va->last_pt_update = dma_fence_get_stub(); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); @@ -2067,7 +2068,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_cpu_funcs; else vm->update_funcs = &amdgpu_vm_sdma_funcs; - vm->last_update = NULL; + + vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); vm->last_tlb_flush = dma_fence_get_stub(); @@ -2192,7 +2194,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) goto unreserve_bo; dma_fence_put(vm->last_update); - vm->last_update = NULL; + vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; /* Free the shadow bo for compute VM */ From 9d7a348d368ccd940242d4aa68292cf665f6d474 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 5 May 2023 13:20:11 +0800 Subject: [PATCH 201/861] drm/amd/pm: parse pp_handle under appropriate conditions amdgpu_dpm_is_overdrive_supported is a common API across all asics, so we should cast pp_handle into correct structure under different power frameworks. v2: using return directly to simplify code v3: SI asic does not carry od_enabled member in pp_handle, and update Fixes tag Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2541 Fixes: eb4900aa4c49 ("drm/amdgpu: Fix kernel NULL pointer dereference in dpm functions") Suggested-by: Mario Limonciello Signed-off-by: Guchun Chen Reviewed-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 300e156b924f..86246f69dbe1 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1460,15 +1460,21 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - struct smu_context *smu = adev->powerplay.pp_handle; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; - if ((is_support_sw_smu(adev) && smu->od_enabled) || - (is_support_sw_smu(adev) && smu->is_apu) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - return true; + return (smu->od_enabled || smu->is_apu); + } else { + struct pp_hwmgr *hwmgr; - return false; + /* SI asic does not carry od_enabled */ + if (adev->family == AMDGPU_FAMILY_SI) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } } int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, From db5dcd476eb0288cdfa781725582dcf9bd747a6b Mon Sep 17 00:00:00 2001 From: YuBiao Wang Date: Fri, 5 May 2023 14:35:32 +0800 Subject: [PATCH 202/861] drm/amdgpu: set default num_kcq to 2 under sriov MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The number of kernel queues has impact on the latency under sriov usecase. So to reduce the latency we set the default num_kcq = 2 under sriov if not set manually. Signed-off-by: YuBiao Wang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 1311e42ab8e9..d0ad7cb0fa05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -68,6 +68,9 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) /* enable mcbp for sriov */ amdgpu_mcbp = 1; + /* Reduce kcq number to 2 to reduce latency */ + if (amdgpu_num_kcq == -1) + amdgpu_num_kcq = 2; } void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, From 273f47997718fc72d96e5a4a640538c00575fa7e Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 26 Apr 2023 16:02:28 -0400 Subject: [PATCH 203/861] drm/amd/display: Add symclk workaround during disable link output [Why & How] This is originally a change (9c75891f) in DCN32 because of the lack of interface to set TX while keeping symclk on. Adding this workaround to DCN314 will resolve the current issue. Fixes: 9c75891feef0 ("drm/amd/display: rework recent update PHY state commit") Reviewed-by: Nicholas Kazlauskas Acked-by: Alex Hung Signed-off-by: Leo Chen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn314/dcn314_hwseq.c | 65 +++++++++++++++++++ .../drm/amd/display/dc/dcn314/dcn314_hwseq.h | 2 + .../drm/amd/display/dc/dcn314/dcn314_init.c | 2 +- 3 files changed, 68 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 6fb3f64e3057..f7a3e0d71d01 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -421,3 +421,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool PERF_TRACE(); } +static void apply_symclk_on_tx_off_wa(struct dc_link *link) +{ + /* There are use cases where SYMCLK is referenced by OTG. For instance + * for TMDS signal, OTG relies SYMCLK even if TX video output is off. + * However current link interface will power off PHY when disabling link + * output. This will turn off SYMCLK generated by PHY. The workaround is + * to identify such case where SYMCLK is still in use by OTG when we + * power off PHY. When this is detected, we will temporarily power PHY + * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling + * program_pix_clk interface. When OTG is disabled, we will then power + * off PHY by calling disable link output again. + * + * In future dcn generations, we plan to rework transmitter control + * interface so that we could have an option to set SYMCLK ON TX OFF + * state in one step without this workaround + */ + + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + if (link->phy_state.symclk_ref_cnts.otg > 0) { + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + dc->link_srv->dp_get_encoding_format( + &pipe_ctx->link_config.dp_link_settings), + &pipe_ctx->pll_settings); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + break; + } + } + } +} + +void dcn314_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + /* + * Add the logic to extract BOTH power up and power down sequences + * from enable/disable link output and only call edp panel control + * in enable_link_dp and disable_link_dp once. + */ + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + + apply_symclk_on_tx_off_wa(link); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index c786d5e6a428..6d0b62503caa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); + #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index ce53339b2e10..ed8a1b94c006 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, + .disable_link_output = dcn314_disable_link_output, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, From 62cc621604a466ede81a125a2ed63e05695a48cc Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Wed, 26 Apr 2023 19:33:16 -0400 Subject: [PATCH 204/861] drm/amd/display: Check Vactive for VRR active for FPO + Vactive [Description] - For FPO + Vactive cases, we rely on the Vactive display to be at it's nominal refresh rate because the Vactive pipe may not necessarily assert P-State allow while it's in VBLANK - For cases where the Vactive display has a stretched VBLANK due to VRR, we could underflow when trying to complete an FPO + Vactive MCLK switch because the FPO display has limited VBLANK time in waiting for the Vactive display to assert P-State allow naturally - Block FPO + Vactive if the Vactive display has VRR active (variable or fixed) Reviewed-by: Jun Lei Acked-by: Alex Hung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 5 ++++- drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 7 ++++++- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 965b348dc8f1..c26dfdd48dd9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2604,7 +2604,7 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->stream && stream_update->stream->freesync_on_desktop && (stream_update->vrr_infopacket || stream_update->allow_freesync || - stream_update->vrr_active_variable)) + stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) su_flags->bits.fams_changed = 1; if (su_flags->raw != 0) @@ -2964,6 +2964,9 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_active_variable) stream->vrr_active_variable = *update->vrr_active_variable; + if (update->vrr_active_fixed) + stream->vrr_active_fixed = *update->vrr_active_fixed; + if (update->crtc_timing_adjust) stream->adjust = *update->crtc_timing_adjust; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 0add5ecc895f..d5b3e3a32cc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -233,6 +233,7 @@ struct dc_stream_state { */ bool vrr_active_variable; bool freesync_on_desktop; + bool vrr_active_fixed; bool converter_disable_audio; uint8_t qs_bit; @@ -326,6 +327,7 @@ struct dc_stream_update { bool integer_scaling_update; bool *allow_freesync; bool *vrr_active_variable; + bool *vrr_active_fixed; struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 826059d5b367..46fd7b68857c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -35,6 +35,7 @@ #define DC_LOGGER_INIT(logger) +static const unsigned int MAX_FPO_VACTIVE_BLANK_US = 600; static const struct subvp_high_refresh_list subvp_high_refresh_list = { .min_refresh = 120, .max_refresh = 165, @@ -2924,6 +2925,7 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint unsigned int i, pipe_idx; const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vactive_found = false; + unsigned int blank_us = 0; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -2931,7 +2933,10 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint if (!pipe->stream) continue; - if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us) { + blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total / + (double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000; + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us && + !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < MAX_FPO_VACTIVE_BLANK_US) { vactive_found = true; break; } From 84f14428b1e0d1f61776c5fcfdef181129533e0b Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 27 Apr 2023 15:10:13 -0400 Subject: [PATCH 205/861] drm/amd/display: Enforce 60us prefetch for 200Mhz DCFCLK modes [Description] - Due to bandwidth / arbitration issues at 200Mhz DCFCLK, we want to enforce minimum 60us of prefetch to avoid intermittent underflow issues - Since 60us prefetch is already enforced for UCLK DPM0, and many DCFCLK's > 200Mhz are mapped to UCLK DPM1, in theory there should not be any UCLK DPM regressions by enforcing greater prefetch Reviewed-by: Nevenko Stupar Reviewed-by: Jun Lei Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Alex Hung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c | 5 +++-- .../gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 66f44a013fe5..958d27224f64 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightY[k], v->SwathHeightC[k], TWait, - v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -3314,7 +3315,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index 500b3dd6052d..d98e36a9a09c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,6 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff #define MEM_STROBE_FREQ_MHZ 1600 +#define MIN_DCFCLK_FREQ_MHZ 200 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 struct display_mode_lib; From a00e595207d001432a85758954c3a6f6a9896368 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 27 Apr 2023 15:10:41 -0400 Subject: [PATCH 206/861] drm/amd/display: Block SubVP high refresh when VRR active fixed [Description] - SubVP high refresh is blocked when VRR is active variable, but we should also block it for when VRR is active fixed (video use case) Reviewed-by: Nevenko Stupar Reviewed-by: Jun Lei Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Alex Hung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 46fd7b68857c..cd28980b2b56 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2825,7 +2825,7 @@ bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context uint32_t i; if (!dc->debug.disable_subvp_high_refresh && pipe->stream && - pipe->plane_state && !pipe->stream->vrr_active_variable) { + pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) { refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); From 807a1c14276b6ba6dc7efb4784ac35bceea1413f Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 28 Apr 2023 17:29:02 -0400 Subject: [PATCH 207/861] drm/amd/display: Block SubVP on displays that have pixclk > 1800Mhz [Description] - Enabling SubVP on high refresh rate displays had a side effect of also enabling on high bandwidth displays such as 8K60 - However, these are not validated and should be blocked for the time being - Block SubVP on displays that have pix rate > 1800Mhz (includes 8K60 displays) Reviewed-by: Jun Lei Reviewed-by: Nevenko Stupar Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Alex Hung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h | 1 + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 04be01ae1ecf..42ccfd13a37c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -41,6 +41,7 @@ #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100 #define SUBVP_HIGH_REFRESH_LIST_LEN 3 +#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index cd28980b2b56..f7e45d935a29 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -703,6 +703,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * - Not TMZ surface */ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && pipe->stream->mall_stream_config.type == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && From 2f48965bdc02d781181ee4fa3d5b3cc168e6d9af Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Thu, 4 May 2023 01:46:12 -0400 Subject: [PATCH 208/861] drm/amdgpu: drop gfx_v11_0_cp_ecc_error_irq_funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The gfx.cp_ecc_error_irq is retired in gfx11. In gfx_v11_0_hw_fini still use amdgpu_irq_put to disable this interrupt, which caused the call trace in this function. [ 102.873958] Call Trace: [ 102.873959] [ 102.873961] gfx_v11_0_hw_fini+0x23/0x1e0 [amdgpu] [ 102.874019] gfx_v11_0_suspend+0xe/0x20 [amdgpu] [ 102.874072] amdgpu_device_ip_suspend_phase2+0x240/0x460 [amdgpu] [ 102.874122] amdgpu_device_ip_suspend+0x3d/0x80 [amdgpu] [ 102.874172] amdgpu_device_pre_asic_reset+0xd9/0x490 [amdgpu] [ 102.874223] amdgpu_device_gpu_recover.cold+0x548/0xce6 [amdgpu] [ 102.874321] amdgpu_debugfs_reset_work+0x4c/0x70 [amdgpu] [ 102.874375] process_one_work+0x21f/0x3f0 [ 102.874377] worker_thread+0x200/0x3e0 [ 102.874378] ? process_one_work+0x3f0/0x3f0 [ 102.874379] kthread+0xfd/0x130 [ 102.874380] ? kthread_complete_and_exit+0x20/0x20 [ 102.874381] ret_from_fork+0x22/0x30 v2: - Handle umc and gfx ras cases in separated patch - Retired the gfx_v11_0_cp_ecc_error_irq_funcs in gfx11 v3: - Improve the subject and code comments - Add judgment on gfx11 in the function of amdgpu_gfx_ras_late_init v4: - Drop the define of CP_ME1_PIPE_INST_ADDR_INTERVAL and SET_ECC_ME_PIPE_STATE which using in gfx_v11_0_set_cp_ecc_error_state - Check cp_ecc_error_irq.funcs rather than ip version for a more sustainable life v5: - Simplify judgment conditions Signed-off-by: Horatio Zhang Reviewed-by: Hawking Zhang Acked-by: Christian König Reviewed-by: Guchun Chen Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 8 +++-- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 46 ------------------------- 2 files changed, 5 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 76dd2841cc0d..a22d88a4178a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -793,9 +793,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); - if (r) - goto late_fini; + if (adev->gfx.cp_ecc_error_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } } else { amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f09e2558e73b..7bff50e58cff 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1355,13 +1355,6 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; - /* ECC error */ - r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_ECC_ERROR, - &adev->gfx.cp_ecc_error_irq); - if (r) - return r; - /* FED error */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, @@ -4369,7 +4362,6 @@ static int gfx_v11_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -5846,36 +5838,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev } } -#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 -#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ - do { \ - uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ - tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ - WREG32_SOC15_IP(GC, reg_addr, tmp); \ - } while (0) - -static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - uint32_t ecc_irq_state = 0; - uint32_t pipe0_int_cntl_addr = 0; - int i = 0; - - ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; - - pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); - - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); - - for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) - SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, - ecc_irq_state); - - return 0; -} - static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -6292,11 +6254,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .process = gfx_v11_0_priv_inst_irq, }; -static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { - .set = gfx_v11_0_set_cp_ecc_error_state, - .process = amdgpu_gfx_cp_ecc_error_irq, -}; - static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { .process = gfx_v11_0_rlc_gc_fed_irq, }; @@ -6312,9 +6269,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; - adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ - adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; - adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; From 4994d1f0a754cded0afb62c4753d00760ddca511 Mon Sep 17 00:00:00 2001 From: "Lin.Cao" Date: Mon, 8 May 2023 17:28:41 +0800 Subject: [PATCH 209/861] drm/amdgpu: Fix vram recover doesn't work after whole GPU reset (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v1: Vmbo->shadow is used to back vram bo up when vram lost. So that we should set shadow as vmbo->shadow to recover vmbo->bo v2: Modify if(vmbo->shadow) shadow = vmbo->shadow as if(!vmbo->shadow) continue; Fixes: e18aaea733da ("drm/amdgpu: move shadow_list to amdgpu_bo_vm") Reviewed-by: Christian König Signed-off-by: Lin.Cao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ae0bcffa2591..e94507a10e15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4513,7 +4513,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - shadow = &vmbo->bo; + /* If vm is compute context or adev is APU, shadow will be NULL */ + if (!vmbo->shadow) + continue; + shadow = vmbo->shadow; + /* No need to recover an evicted BO */ if (shadow->tbo.resource->mem_type != TTM_PL_TT || shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || From aaa07c0d08a37bf72cd73a3119ff99dc2bfece74 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 30 Apr 2023 15:54:14 -0400 Subject: [PATCH 210/861] drm/amd/display: Promote DAL to 3.2.235 This version brings along following fixes: - Block SubVP on displays that have pixclk > 1800Mhz - Block SubVP high refresh when VRR active fixed - Enforce 60us prefetch for 200Mhz DCFCLK modes - Check Vactive for VRR active for FPO + Vactive - Add symclk workaround during disable link output - Show the DCN/DCE version in the log - Add additional pstate registers to HW state query Acked-by: Alex Hung Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7e3f20a3a02f..f4448b545d70 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.234" +#define DC_VER "3.2.235" #define MAX_SURFACES 3 #define MAX_PLANES 6 From eb4f01784ec1d589629dd8c85044b7c07e5f6993 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Sat, 6 May 2023 16:52:59 +0800 Subject: [PATCH 211/861] drm/amdgpu: disable sdma ecc irq only when sdma RAS is enabled in suspend sdma_v4_0_ip is shared on a few asics, but in sdma_v4_0_hw_fini, driver unconditionally disables ecc_irq which is only enabled on those asics enabling sdma ecc. This will introduce a warning in suspend cycle on those chips with sdma ip v4.0, while without sdma ecc. So this patch correct this. [ 7283.166354] RIP: 0010:amdgpu_irq_put+0x45/0x70 [amdgpu] [ 7283.167001] RSP: 0018:ffff9a5fc3967d08 EFLAGS: 00010246 [ 7283.167019] RAX: ffff98d88afd3770 RBX: 0000000000000001 RCX: 0000000000000000 [ 7283.167023] RDX: 0000000000000000 RSI: ffff98d89da30390 RDI: ffff98d89da20000 [ 7283.167025] RBP: ffff98d89da20000 R08: 0000000000036838 R09: 0000000000000006 [ 7283.167028] R10: ffffd5764243c008 R11: 0000000000000000 R12: ffff98d89da30390 [ 7283.167030] R13: ffff98d89da38978 R14: ffffffff999ae15a R15: ffff98d880130105 [ 7283.167032] FS: 0000000000000000(0000) GS:ffff98d996f00000(0000) knlGS:0000000000000000 [ 7283.167036] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 7283.167039] CR2: 00000000f7a9d178 CR3: 00000001c42ea000 CR4: 00000000003506e0 [ 7283.167041] Call Trace: [ 7283.167046] [ 7283.167048] sdma_v4_0_hw_fini+0x38/0xa0 [amdgpu] [ 7283.167704] amdgpu_device_ip_suspend_phase2+0x101/0x1a0 [amdgpu] [ 7283.168296] amdgpu_device_suspend+0x103/0x180 [amdgpu] [ 7283.168875] amdgpu_pmops_freeze+0x21/0x60 [amdgpu] [ 7283.169464] pci_pm_freeze+0x54/0xc0 Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2522 Signed-off-by: Guchun Chen Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index b3cc04dd8653..9295ac7edd56 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } - for (i = 0; i < adev->sdma.num_instances; i++) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } } sdma_v4_0_ctx_switch_enable(adev, false); From 936e95a461c2606ac2ffae3bb0d991e7081eafd8 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Tue, 9 May 2023 09:36:49 +0800 Subject: [PATCH 212/861] drm/amd/pm: avoid potential UBSAN issue on legacy asics Prevent further dpm casting on legacy asics without od_enabled in amdgpu_dpm_is_overdrive_supported. This can avoid UBSAN complain in init sequence. v2: add a macro to check legacy dpm instead of checking asic family/type v3: refine macro name for naming consistency Suggested-by: Evan Quan Signed-off-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 86246f69dbe1..078aaaa53162 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -36,6 +36,8 @@ #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) +#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) + int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1467,8 +1469,11 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) } else { struct pp_hwmgr *hwmgr; - /* SI asic does not carry od_enabled */ - if (adev->family == AMDGPU_FAMILY_SI) + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) return false; hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; From d97b02bb9c7aa3008d473d11001e1b45b7e0c7c6 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Sat, 6 May 2023 20:06:45 +0800 Subject: [PATCH 213/861] drm/amdgpu/gfx: disable gfx9 cp_ecc_error_irq only when enabling legacy gfx ras gfx9 cp_ecc_error_irq is only enabled when legacy gfx ras is assert. So in gfx_v9_0_hw_fini, interrupt disablement for cp_ecc_error_irq should be executed under such condition, otherwise, an amdgpu_irq_put calltrace will occur. [ 7283.170322] RIP: 0010:amdgpu_irq_put+0x45/0x70 [amdgpu] [ 7283.170964] RSP: 0018:ffff9a5fc3967d00 EFLAGS: 00010246 [ 7283.170967] RAX: ffff98d88afd3040 RBX: ffff98d89da20000 RCX: 0000000000000000 [ 7283.170969] RDX: 0000000000000000 RSI: ffff98d89da2bef8 RDI: ffff98d89da20000 [ 7283.170971] RBP: ffff98d89da20000 R08: ffff98d89da2ca18 R09: 0000000000000006 [ 7283.170973] R10: ffffd5764243c008 R11: 0000000000000000 R12: 0000000000001050 [ 7283.170975] R13: ffff98d89da38978 R14: ffffffff999ae15a R15: ffff98d880130105 [ 7283.170978] FS: 0000000000000000(0000) GS:ffff98d996f00000(0000) knlGS:0000000000000000 [ 7283.170981] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 7283.170983] CR2: 00000000f7a9d178 CR3: 00000001c42ea000 CR4: 00000000003506e0 [ 7283.170986] Call Trace: [ 7283.170988] [ 7283.170989] gfx_v9_0_hw_fini+0x1c/0x6d0 [amdgpu] [ 7283.171655] amdgpu_device_ip_suspend_phase2+0x101/0x1a0 [amdgpu] [ 7283.172245] amdgpu_device_suspend+0x103/0x180 [amdgpu] [ 7283.172823] amdgpu_pmops_freeze+0x21/0x60 [amdgpu] [ 7283.173412] pci_pm_freeze+0x54/0xc0 [ 7283.173419] ? __pfx_pci_pm_freeze+0x10/0x10 [ 7283.173425] dpm_run_callback+0x98/0x200 [ 7283.173430] __device_suspend+0x164/0x5f0 v2: drop gfx11 as it's fixed in a different solution by retiring cp_ecc_irq funcs(Hawking) Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2522 Signed-off-by: Guchun Chen Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4a7556099469..e093e83ae739 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3761,7 +3761,8 @@ static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); From c488a9370d5a1da71f7683e0d6da40a2ab0f1be3 Mon Sep 17 00:00:00 2001 From: Saleemkhan Jamadar Date: Tue, 9 May 2023 12:37:50 +0530 Subject: [PATCH 214/861] drm/amdgpu/jpeg: Remove harvest checking for JPEG3 Register CC_UVD_HARVESTING is obsolete for JPEG 3.1.2 Signed-off-by: Saleemkhan Jamadar Reviewed-by: Veerabadhran Gopalakrishnan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index c55e09432e26..1c2292cc5f2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle) switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 1, 1): + case IP_VERSION(3, 1, 2): break; default: harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); From 9a06655e73834819149466ae8170ffe53b23e6f8 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 8 May 2023 11:09:41 +0800 Subject: [PATCH 215/861] drm/amdgpu/display: Enable DC_FP for LoongArch LoongArch now provides kernel_fpu_begin() and kernel_fpu_end() that are used like the x86 counterparts in commit 2b3bd32ea3a22ea2d ("LoongArch: Provide kernel fpu functions"), so we can enable DC_FP on LoongArch for supporting more DCN devices. Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 6 ++++-- drivers/gpu/drm/amd/display/dc/dml/Makefile | 5 +++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 06b438217c61..b015c7d19531 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -8,7 +8,7 @@ config DRM_AMD_DC depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 - select DRM_AMD_DC_FP if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) + select DRM_AMD_DC_FP if (X86 || LOONGARCH || PPC64 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index c42aa947c969..172aa10a8800 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -33,6 +33,8 @@ #include #elif defined(CONFIG_ARM64) #include +#elif defined(CONFIG_LOONGARCH) +#include #endif /** @@ -88,7 +90,7 @@ void dc_fpu_begin(const char *function_name, const int line) *pcpu += 1; if (*pcpu == 1) { -#if defined(CONFIG_X86) +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) migrate_disable(); kernel_fpu_begin(); #elif defined(CONFIG_PPC64) @@ -128,7 +130,7 @@ void dc_fpu_end(const char *function_name, const int line) pcpu = get_cpu_ptr(&fpu_recursion_depth); *pcpu -= 1; if (*pcpu <= 0) { -#if defined(CONFIG_X86) +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) kernel_fpu_end(); migrate_enable(); #elif defined(CONFIG_PPC64) diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 01db035589c5..77cf5545c94c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -38,6 +38,11 @@ ifdef CONFIG_ARM64 dml_rcflags := -mgeneral-regs-only endif +ifdef CONFIG_LOONGARCH +dml_ccflags := -mfpu=64 +dml_rcflags := -msoft-float +endif + ifdef CONFIG_CC_IS_GCC ifneq ($(call gcc-min-version, 70100),y) IS_OLD_GCC = 1 From 3fb9dd5fef7ccd756544713e6e84db1c525816c4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 9 May 2023 17:21:14 +0300 Subject: [PATCH 216/861] drm/amdgpu: release correct lock in amdgpu_gfx_enable_kgq() This function was releasing the incorrect lock on the error path. Reported-by: kernel test robot Fixes: 1156e1a60f02 ("drm/amdgpu: add [en/dis]able_kgq() functions") Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a22d88a4178a..87e1a1a9f298 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -644,7 +644,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) adev->gfx.num_gfx_rings); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&kiq->ring_lock); return r; } From a39b52c838127a42c57d5e080dbd5204770aa80e Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Tue, 9 May 2023 18:49:46 +0200 Subject: [PATCH 217/861] drm/amdgpu/gfx10: Disable gfxoff before disabling powergating. Otherwise we get a full system lock (looks like a FW mess). Copied the order from the GFX9 powergating code. Fixes: 366468ff6c34 ("drm/amdgpu: Allow GfxOff on Vangogh as default") Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2545 Signed-off-by: Bas Nieuwenhuizen Tested-by: Guilherme G. Piccoli Cc: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 2e0234b43f43..8e86b2c23c0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -8036,8 +8036,14 @@ static int gfx_v10_0_set_powergating_state(void *handle, case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 7): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v10_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; From ee30b8001cd85ba0ef287b216eb1dad58bb58159 Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Tue, 9 May 2023 18:49:47 +0200 Subject: [PATCH 218/861] drm/amdgpu/gfx11: Adjust gfxoff before powergating on gfx11 as well (Bas: speculative change to mirror gfx10/gfx9) Signed-off-by: Guilherme G. Piccoli Signed-off-by: Bas Nieuwenhuizen Cc: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 7bff50e58cff..f77779c31043 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5076,8 +5076,14 @@ static int gfx_v11_0_set_powergating_state(void *handle, break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v11_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; From 1d6ecab1ac0fdff8e62ff3ba506b606177010d08 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Tue, 9 May 2023 19:08:17 +0530 Subject: [PATCH 219/861] drm/amd/amdgpu: Fix warnings in amdgpu _object, _ring.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below warnings reported by checkpatch: WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: static const char * array should probably be static const char * const WARNING: space prohibited between function name and open parenthesis '(' WARNING: braces {} are not necessary for single statement blocks WARNING: Symbolic permissions 'S_IRUGO' are not preferred. Consider using octal permissions '0444'. Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 9 ++++----- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2bd1a54ee866..9f0d5f02119e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -130,7 +130,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) u32 c = 0; if (domain & AMDGPU_GEM_DOMAIN_VRAM) { - unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; + unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; places[c].fpfn = 0; places[c].lpfn = 0; @@ -935,7 +935,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; amdgpu_bo_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { - unsigned fpfn, lpfn; + unsigned int fpfn, lpfn; fpfn = min_offset >> PAGE_SHIFT; lpfn = max_offset >> PAGE_SHIFT; @@ -1016,7 +1016,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) } } -static const char *amdgpu_vram_names[] = { +static const char * const amdgpu_vram_names[] = { "UNKNOWN", "GDDR1", "DDR2", @@ -1148,8 +1148,8 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, - uint32_t metadata_size, uint64_t flags) +int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata, + u32 metadata_size, uint64_t flags) { struct amdgpu_bo_user *ubo; void *buffer; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 1e9ae3bc59d3..d8a78de1f335 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -78,7 +78,7 @@ unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type) * Allocate @ndw dwords in the ring buffer (all asics). * Returns 0 on success, error on failure. */ -int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) +int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) { /* Align requested size with padding so unlock_commit can * pad safely */ @@ -315,9 +315,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; - if (WARN_ON(max_ibs_dw > max_dw)) { + if (WARN_ON(max_ibs_dw > max_dw)) max_dw = max_ibs_dw; - } ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); @@ -573,13 +572,13 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, char name[32]; sprintf(name, "amdgpu_ring_%s", ring->name); - debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, + debugfs_create_file_size(name, S_IFREG | 0444, root, ring, &amdgpu_debugfs_ring_fops, ring->ring_size + 12); if (ring->mqd_obj) { sprintf(name, "amdgpu_mqd_%s", ring->name); - debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, + debugfs_create_file_size(name, S_IFREG | 0444, root, ring, &amdgpu_debugfs_mqd_fops, ring->mqd_size); } From 6d99f3f4ea948f26b074a069c0406dd7f1cb0d8f Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Wed, 10 May 2023 16:13:48 +0800 Subject: [PATCH 220/861] drm/amdgpu: change gfx 11.0.4 external_id range gfx 11.0.4 range starts from 0x80. Fixes: 311d52367d0a ("drm/amdgpu: add soc21 common ip block support for GC 11.0.4") Signed-off-by: Yifan Zhang Reported-by: Yogesh Mohan Marimuthu Acked-by: Alex Deucher Reviewed-by: Tim Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc21.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 0f82b8e83acb..6bff936a6e55 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_JPEG; - adev->external_rev_id = adev->rev_id + 0x1; + adev->external_rev_id = adev->rev_id + 0x80; break; default: From cabbdea1f1861098991768d7bbf5a49ed1608213 Mon Sep 17 00:00:00 2001 From: Daniil Dulov Date: Thu, 11 May 2023 04:23:14 -0700 Subject: [PATCH 221/861] drm/amdkfd: Fix potential deallocation of previously deallocated memory. Pointer mqd_mem_obj can be deallocated in kfd_gtt_sa_allocate(). The function then returns non-zero value, which causes the second deallocation. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: d1f8f0d17d40 ("drm/amdkfd: Move non-sdma mqd allocation out of init_mqd") Signed-off-by: Daniil Dulov Signed-off-by: Felix Kuehling Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index fdbfd725841f..51b53110341b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -115,18 +115,19 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, &(mqd_mem_obj->gtt_mem), &(mqd_mem_obj->gpu_addr), (void *)&(mqd_mem_obj->cpu_ptr), true); + + if (retval) { + kfree(mqd_mem_obj); + return NULL; + } } else { retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), &mqd_mem_obj); - } - - if (retval) { - kfree(mqd_mem_obj); - return NULL; + if (retval) + return NULL; } return mqd_mem_obj; - } static void init_mqd(struct mqd_manager *mm, void **mqd, From 82ad22bbad008f84ec52208c5ba2c8f1cf55fd8d Mon Sep 17 00:00:00 2001 From: Jesse Zhang Date: Fri, 12 May 2023 09:29:17 +0800 Subject: [PATCH 222/861] drm/amdgpu: Differentiate between Raven2 and Raven/Picasso according to revision id Due to the raven2 and raven/picasso maybe have the same GC_HWIP version. So differentiate them by revision id. Signed-off-by: shanshengwang Signed-off-by: Jesse Zhang Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 37 ++++++++++++--------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e093e83ae739..46577b59cb04 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4000,30 +4000,25 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) clock = clock_lo | (clock_hi << 32ULL); break; case IP_VERSION(9, 1, 0): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; case IP_VERSION(9, 2, 2): preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { + if (adev->rev_id >= 0x8) { + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); + } else { + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); + } + /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + if (adev->rev_id >= 0x8) + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + else + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); clock_hi = hi_check; } preempt_enable(); From e84e40dcb3fc972cbe41d228f1e45128285e0c47 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 12 May 2023 15:04:51 +0800 Subject: [PATCH 223/861] drm/amd/display: Simplify the calculation of variables ./drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c:586:37-39: WARNING !A || A && B is equivalent to !A || B. ./drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c:595:37-39: WARNING !A || A && B is equivalent to !A || B. Reported-by: Abaci Robot Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=4941 Signed-off-by: Jiapeng Chong Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 4950eaa4406b..2de910e0ce75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -583,8 +583,8 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (!pipe->stream || (pipe->stream && !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || - pipe->stream->fpo_in_use))) { + if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || + pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); } @@ -592,7 +592,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) /* Today only FPO uses cursor P-State force. Only clear cursor P-State force * if it's not FPO. */ - if (!pipe->stream || (pipe->stream && !pipe->stream->fpo_in_use)) { + if (!pipe->stream || !pipe->stream->fpo_in_use) { if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false); } From 17d62410aec363ec972f532fed5aba89b3f59ae7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 11 May 2023 10:40:03 -0400 Subject: [PATCH 224/861] drm/amdgpu/gmc11: implement get_vbios_fb_size() Implement get_vbios_fb_size() so we can properly reserve the vbios splash screen to avoid potential artifacts on the screen during the transition from the pre-OS console to the OS console. Acked-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index f73c238f3145..2f570fb5febe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -31,6 +31,8 @@ #include "umc_v8_10.h" #include "athub/athub_3_0_0_sh_mask.h" #include "athub/athub_3_0_0_offset.h" +#include "dcn/dcn_3_2_0_offset.h" +#include "dcn/dcn_3_2_0_sh_mask.h" #include "oss/osssys_6_0_0_offset.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "navi10_enum.h" @@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) { - return 0; + u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + u32 pitch; + + viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * + 4); + } + + return size; } static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { From 6e87c4229513904295674b84b6e2d12951567191 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Mon, 24 Apr 2023 14:27:26 -0500 Subject: [PATCH 225/861] drm/amdgpu: improve wait logic at fence polling Accomplish this by reading the seq number right away instead of sleep for 5us. There are certain cases where the fence is ready almost immediately. Sleep number granularity was also reduced as the majority of the kiq tlb flush takes between 2us to 6us. Signed-off-by: Alex Sierra Acked-by: Felix Kuehling Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 1994eafd3e71..b7d648dd0170 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,14 +376,11 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, uint32_t wait_seq, signed long timeout) { - uint32_t seq; - - do { - seq = amdgpu_fence_read(ring); - udelay(5); - timeout -= 5; - } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); + while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { + udelay(2); + timeout -= 2; + } return timeout > 0 ? timeout : 0; } /** From 3083b1007d4b8d377f8e2b5ce349a275a2fff725 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Tue, 9 May 2023 16:15:27 +0800 Subject: [PATCH 226/861] drm/amdgpu: skip disabling fence driver src_irqs when device is unplugged When performing device unbind or halt, we have disabled all irqs at the very begining like amdgpu_pci_remove or amdgpu_device_halt. So amdgpu_irq_put for irqs stored in fence driver should not be called any more, otherwise, below calltrace will arrive. [ 139.114088] WARNING: CPU: 2 PID: 1550 at drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c:616 amdgpu_irq_put+0xf6/0x110 [amdgpu] [ 139.114655] Call Trace: [ 139.114655] [ 139.114657] amdgpu_fence_driver_hw_fini+0x93/0x130 [amdgpu] [ 139.114836] amdgpu_device_fini_hw+0xb6/0x350 [amdgpu] [ 139.114955] amdgpu_driver_unload_kms+0x51/0x70 [amdgpu] [ 139.115075] amdgpu_pci_remove+0x63/0x160 [amdgpu] [ 139.115193] ? __pm_runtime_resume+0x64/0x90 [ 139.115195] pci_device_remove+0x3a/0xb0 [ 139.115197] device_remove+0x43/0x70 [ 139.115198] device_release_driver_internal+0xbd/0x140 Signed-off-by: Guchun Chen Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index b7d648dd0170..928814492d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -578,7 +578,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (r) amdgpu_fence_driver_force_completion(ring); - if (ring->fence_drv.irq_src) + if (!drm_dev_is_unplugged(adev_to_drm(adev)) && + ring->fence_drv.irq_src) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); From 572773992e31e0e31692adf6797a3bf7e094097c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 May 2023 15:41:27 +0800 Subject: [PATCH 227/861] drm/amd/pm: fix possible power mode mismatch between driver and PMFW PMFW may boots the ASIC with a different power mode from the system's real one. Notify PMFW explicitly the power mode the system in. This is needed only when ACDC switch via gpio is not supported. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 18 +++++++++++++++++ .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 20 +------------------ .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 1 + 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5633c5797e85..2ddf5198e5c4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -733,6 +733,24 @@ static int smu_late_init(void *handle) return ret; } + /* + * Explicitly notify PMFW the power mode the system in. Since + * the PMFW may boot the ASIC with a different mode. + * For those supporting ACDC switch via gpio, PMFW will + * handle the switch automatically. Driver involvement + * is unnecessary. + */ + if (!smu->dc_controlled_by_gpio) { + ret = smu_set_power_source(smu, + adev->pm.ac_power ? SMU_POWER_SOURCE_AC : + SMU_POWER_SOURCE_DC); + if (ret) { + dev_err(adev->dev, "Failed to switch to %s mode!\n", + adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index c4000518dc56..275f708db636 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu) return 0; ret = navi10_run_umc_cdr_workaround(smu); - if (ret) { + if (ret) dev_err(adev->dev, "Failed to apply umc cdr workaround!\n"); - return ret; - } - - if (!smu->dc_controlled_by_gpio) { - /* - * For Navi1X, manually switch it to AC mode as PMFW - * may boot it with DC mode. - */ - ret = smu_v11_0_set_power_source(smu, - adev->pm.ac_power ? - SMU_POWER_SOURCE_AC : - SMU_POWER_SOURCE_DC); - if (ret) { - dev_err(adev->dev, "Failed to switch to %s mode!\n", - adev->pm.ac_power ? "AC" : "DC"); - return ret; - } - } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 3d9ff46706fb..98a33f8ee209 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1770,6 +1770,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, .get_power_limit = smu_v13_0_7_get_power_limit, .set_power_limit = smu_v13_0_set_power_limit, + .set_power_source = smu_v13_0_set_power_source, .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, .set_tool_table_location = smu_v13_0_set_tool_table_location, From f1822392128e00c1ae1579fe050ae3797db6ea02 Mon Sep 17 00:00:00 2001 From: Mustapha Ghaddar Date: Fri, 28 Apr 2023 12:13:21 -0400 Subject: [PATCH 228/861] drm/amd/display: enable dpia validate Use dpia_validate_usb4_bw() function Fixes: a8b537605e22 ("drm/amd/display: Add function pointer for validate bw usb4") Reviewed-by: Roman Li Reviewed-by: Meenakshikumar Somasundaram Acked-by: Aurabindo Pillai Signed-off-by: Mustapha Ghaddar Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/link/link_validation.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index d4b7da526f0a..e8b2fc4002a5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un link[i] = stream[i].link; bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); } + + ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); + return ret; } From 1d8355ad922423c9f765a644ed04526a6273d9ee Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Tue, 2 May 2023 09:54:56 -0400 Subject: [PATCH 229/861] drm/amd/display: Only skip update for DCFCLK, UCLK, FCLK on overclock [Description] - Update clocks is skipped in the GPU overclock sequence - However, we still need to update DISPCLK, DPPCLK, and DTBCLK because the GPU overclock sequence could temporarily disable ODM 2:1 combine because we disable all planes in the sequence Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 24 +++++++++++-------- drivers/gpu/drm/amd/display/dc/dc.h | 7 +++++- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 85e963ec25ab..1df623b298a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -460,9 +460,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, bool p_state_change_support; bool fclk_p_state_change_support; - if (dc->work_arounds.skip_clock_update) - return; - if (clk_mgr_base->clks.dispclk_khz == 0 || (dc->debug.force_clock_mode & 0x1)) { /* This is from resume or boot up, if forced_clock cfg option used, @@ -489,7 +486,8 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, fclk_p_state_change_support = new_clocks->fclk_p_state_change_support; - if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) { + if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) && + !dc->work_arounds.clock_update_disable_mask.fclk) { clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support; /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */ @@ -503,12 +501,14 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ? new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000); - if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && + !dc->work_arounds.clock_update_disable_mask.dcfclk) { clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz)); } - if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && + !dc->work_arounds.clock_update_disable_mask.dcfclk_ds) { clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz)); } @@ -527,7 +527,8 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, } p_state_change_support = new_clocks->p_state_change_support; - if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) && + !dc->work_arounds.clock_update_disable_mask.uclk) { clk_mgr_base->clks.p_state_change_support = p_state_change_support; /* to disable P-State switching, set UCLK min = max */ @@ -541,20 +542,23 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, update_fclk = true; } - if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) { + if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk && + !dc->work_arounds.clock_update_disable_mask.fclk) { /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED); } /* Always update saved value, even if new value not set due to P-State switching unsupported */ - if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz) && + !dc->work_arounds.clock_update_disable_mask.uclk) { clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; update_uclk = true; } /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ if (clk_mgr_base->clks.p_state_change_support && - (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support)) + (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) && + !dc->work_arounds.clock_update_disable_mask.uclk) dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f4448b545d70..e89de1078964 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -272,8 +272,13 @@ struct dc_bug_wa { bool dedcn20_305_wa; bool skip_clock_update; bool lt_early_cr_pattern; + struct { + uint8_t uclk : 1; + uint8_t fclk : 1; + uint8_t dcfclk : 1; + uint8_t dcfclk_ds: 1; + } clock_update_disable_mask; }; - struct dc_dcc_surface_param { struct dc_size surface_size; enum surface_pixel_format format; From 6ba5a269cdc9f447be882bbf99548361c8ebc254 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Tue, 2 May 2023 12:27:26 -0400 Subject: [PATCH 230/861] drm/amd/display: Update vactive margin and max vblank for fpo + vactive [Description] - Some 1920x1080@60hz displays have VBLANK time > 600us which we still want to accept for FPO + Vactive configs based on testing - Increase max VBLANK time to 1000us to allow these configs for FPO + Vactive - Increase minimum vactive switch margin for FPO + Vactive to 200us - Based on testing, 1920x1080@120hz can have a switch margin of ~160us which requires significantly longer FPO stretch margin (5ms) which we don't want to accept for now - Also move margins into debug option Reviewed-by: Jun Lei Reviewed-by: Nevenko Stupar Acked-by: Aurabindo Pillai Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 ++ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 2 ++ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h | 1 - drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 3 +-- 6 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e89de1078964..1ebb8d3573f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -893,6 +893,8 @@ struct dc_debug_options { bool minimize_dispclk_using_odm; bool disable_subvp_high_refresh; bool disable_dp_plus_plus_wa; + uint32_t fpo_vactive_min_active_margin_us; + uint32_t fpo_vactive_max_blank_us; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 4de2f8813dce..98c394f9f8cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -730,6 +730,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_boot_optimizations = false, .disable_subvp_high_refresh = true, .disable_dp_plus_plus_wa = true, + .fpo_vactive_min_active_margin_us = 200, + .fpo_vactive_max_blank_us = 1000, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 42ccfd13a37c..58826e0aa76e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -39,7 +39,6 @@ #define DCN3_2_MBLK_HEIGHT_8BPE 64 #define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq -#define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100 #define SUBVP_HIGH_REFRESH_LIST_LEN 3 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index df912c333bbd..a8082580df92 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -626,7 +626,7 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre DC_FP_END(); DC_FP_START(); - is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US); + is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us); DC_FP_END(); if (!is_fpo_vactive || dc->debug.disable_fpo_vactive) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 4c1e0f5a5f09..f4cd9749ffdf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -728,6 +728,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_fpo_vactive = false, .disable_boot_optimizations = false, .disable_subvp_high_refresh = true, + .fpo_vactive_min_active_margin_us = 200, + .fpo_vactive_max_blank_us = 1000, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index f7e45d935a29..8c60b88c7d1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -35,7 +35,6 @@ #define DC_LOGGER_INIT(logger) -static const unsigned int MAX_FPO_VACTIVE_BLANK_US = 600; static const struct subvp_high_refresh_list subvp_high_refresh_list = { .min_refresh = 120, .max_refresh = 165, @@ -2937,7 +2936,7 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total / (double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000; if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us && - !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < MAX_FPO_VACTIVE_BLANK_US) { + !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < dc->debug.fpo_vactive_max_blank_us) { vactive_found = true; break; } From 9c384ee8f997d0646844cd431f1c23e6dbf84a98 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 26 Apr 2023 18:17:13 -0600 Subject: [PATCH 231/861] drm/amd/display: Convert connector signal id to string To improve the readability of the of the log, this commit introduces a function that converts the signal type id to a human-readable string. Reviewed-by: Jerry Zuo Acked-by: Aurabindo Pillai Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/link/link_factory.c | 6 ++-- .../drm/amd/display/include/signal_types.h | 28 +++++++++++++++++++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 1515c817f03b..ac1c3e2e7c1d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -563,11 +563,9 @@ static bool construct_phy(struct dc_link *link, goto create_fail; } - /* TODO: #DAL3 Implement id to str function.*/ - LINK_INFO("Connector[%d] description:" - "signal %d\n", + LINK_INFO("Connector[%d] description: signal: %s\n", init_params->connector_index, - link->connector_signal); + signal_type_to_string(link->connector_signal)); ddc_service_init_data.ctx = link->ctx; ddc_service_init_data.id = link->link_id; diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 23a308c3eccb..325c5ba4c82a 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -44,6 +44,34 @@ enum signal_type { SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */ }; +static inline const char *signal_type_to_string(const int type) +{ + switch (type) { + case SIGNAL_TYPE_NONE: + return "No signal"; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + return "DVI: Single Link"; + case SIGNAL_TYPE_DVI_DUAL_LINK: + return "DVI: Dual Link"; + case SIGNAL_TYPE_HDMI_TYPE_A: + return "HDMI: TYPE A"; + case SIGNAL_TYPE_LVDS: + return "LVDS"; + case SIGNAL_TYPE_RGB: + return "RGB"; + case SIGNAL_TYPE_DISPLAY_PORT: + return "Display Port"; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + return "Display Port: MST"; + case SIGNAL_TYPE_EDP: + return "Embedded Display Port"; + case SIGNAL_TYPE_VIRTUAL: + return "Virtual"; + default: + return "Unknown"; + } +} + /* help functions for signal types manipulation */ static inline bool dc_is_hdmi_tmds_signal(enum signal_type signal) { From 1a4bcdbea4319efeb26cc4b05be859a7867e02dc Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Tue, 25 Apr 2023 14:29:48 -0400 Subject: [PATCH 232/861] drm/amd/display: Fix possible underflow for displays with large vblank [Why] Underflow observed when using a display with a large vblank region and low refresh rate [How] Simplify calculation of vblank_nom Increase value for VBlankNomDefaultUS to 800us Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dml/dcn314/dcn314_fpu.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 1d00eb9e73c6..554152371eb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -33,7 +33,7 @@ #include "dml/display_mode_vba.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { - .VBlankNomDefaultUS = 668, + .VBlankNomDefaultUS = 800, .gpuvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_enable = 1, @@ -286,7 +286,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool upscaled = false; - bool isFreesyncVideo = false; + const unsigned int max_allowed_vblank_nom = 1023; dc_assert_fp_enabled(); @@ -300,16 +300,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - isFreesyncVideo = pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min; - isFreesyncVideo = isFreesyncVideo && pipe->stream->adjust.v_total_min > timing->v_total; - - if (!isFreesyncVideo) { - pipes[pipe_cnt].pipe.dest.vblank_nom = - dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0)); - } else { - pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; - pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; - } + pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); + pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); if (pipe->plane_state && (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || From d205a800a66e46430ab93c0d450393233d39931a Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Wed, 12 Apr 2023 14:02:01 -0400 Subject: [PATCH 233/861] drm/amd/display: Add visual confirm color support for MCLK switch [Why && How] We would like to have visual confirm color support for MCLK switch. 1. Set visual confirm color to yellow: Vblank MCLK switch. 2. Set visual confirm color to cyan: FPO + Vblank MCLK switch. 3. Set visual confirm color to pink: Vactive MCLK switch. Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Leo (Hanghong) Ma Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 47 +++++++++++++++-- .../drm/amd/display/dc/core/dc_hw_sequencer.c | 50 +++++++++++++++++-- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 22 +++----- .../amd/display/dc/dcn10/dcn10_hw_sequencer.h | 1 - .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 26 +--------- .../drm/amd/display/dc/dcn20/dcn20_hwseq.h | 5 -- .../gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 2 +- .../drm/amd/display/dc/dcn201/dcn201_hwseq.c | 4 +- .../drm/amd/display/dc/dcn201/dcn201_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 2 +- .../drm/amd/display/dc/dcn301/dcn301_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 2 +- .../drm/amd/display/dc/dcn314/dcn314_init.c | 2 +- .../gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 2 +- .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 7 +++ .../gpu/drm/amd/display/dc/inc/core_types.h | 2 + .../gpu/drm/amd/display/dc/inc/hw_sequencer.h | 9 +++- 19 files changed, 125 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c26dfdd48dd9..2c639cd346d7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1119,6 +1119,33 @@ static void phantom_pipe_blank( hws->funcs.wait_for_blank_complete(opp); } +static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +{ + if (dc->ctx->dce_version >= DCN_VERSION_1_0) { + memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) + get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) + get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else { + if (dc->ctx->dce_version < DCN_VERSION_2_0) + color_space_to_black_color( + dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); + } + if (dc->ctx->dce_version >= DCN_VERSION_2_0) { + if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) + get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) + get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) + get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + } + } +} + static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; @@ -1189,6 +1216,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); @@ -3456,6 +3486,14 @@ static void commit_planes_for_stream(struct dc *dc, } } + if (dc->debug.visual_confirm) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3539,15 +3577,14 @@ static void commit_planes_for_stream(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && + if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || + dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && pipe_ctx->stream && pipe_ctx->plane_state) { - /* Only update visual confirm for SUBVP here. + /* Only update visual confirm for SUBVP and Mclk switching here. * The bar appears on all pipes, so we need to update the bar on all displays, * so the information doesn't get stale. */ - struct mpcc_blnd_cfg blnd_cfg = { 0 }; - - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, pipe_ctx->plane_res.hubp->inst); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 2acbf692193f..8a98b8dd008e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -421,6 +421,7 @@ void get_hdr_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { @@ -428,15 +429,17 @@ void get_subvp_visual_confirm_color( bool enable_subvp = false; int i; - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx) + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) return; for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { /* SubVP enable - red */ + color->color_g_y = 0; + color->color_b_cb = 0; color->color_r_cr = color_value; enable_subvp = true; @@ -448,12 +451,51 @@ void get_subvp_visual_confirm_color( if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { color->color_r_cr = 0; - if (pipe_ctx->stream->ignore_msa_timing_param == 1) + if (pipe_ctx->stream->allow_freesync == 1) { /* SubVP enable and DRR on - green */ + color->color_b_cb = 0; color->color_g_y = color_value; - else + } else { /* SubVP enable and No DRR - blue */ + color->color_g_y = 0; color->color_b_cb = color_value; + } + } +} + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + color->color_r_cr = color_value; + color->color_g_y = color_value; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + } + } else { + /* In Vactive - pink */ + color->color_r_cr = color_value; + color->color_b_cb = color_value; + } + /* SubVP */ + get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1ebb8d3573f4..8be2e6d6d888 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -426,6 +426,7 @@ enum visual_confirm { VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, VISUAL_CONFIRM_SUBVP = 14, + VISUAL_CONFIRM_MCLK_SWITCH = 16, }; enum dc_psr_power_opts { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a7ad1d7bc43e..905246a2ece4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2602,23 +2602,15 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) +void dcn10_update_visual_confirm_color(struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) { struct mpc *mpc = dc->res_pool->mpc; - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else - color_space_to_black_color( - dc, pipe_ctx->stream->output_color_space, color); - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); + memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color)); + mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id); } } @@ -2671,7 +2663,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2693,7 +2685,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 0ef7bf7ddb75..ef6d56da417c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -202,7 +202,6 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits); void dcn10_update_visual_confirm_color( struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index f49c1c0d6274..b3e187b1347d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2580,28 +2580,6 @@ void dcn20_reset_hw_ctx_wrap( } } -void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) -{ - struct mpc *mpc = dc->res_pool->mpc; - - // input to MPCC is always RGB, by default leave black_color at 0 - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) - get_hdr_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) - get_surface_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) - get_mpctree_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) - get_surface_tile_visual_confirm_color(pipe_ctx, color); - else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, pipe_ctx, color); - - if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color)); - mpc->funcs->set_bg_color(mpc, color, mpcc_id); - } -} - void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2657,7 +2635,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) if (!pipe_ctx->plane_state->update_flags.bits.full_update && !pipe_ctx->update_flags.bits.mpcc) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); return; } @@ -2679,7 +2657,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) NULL, hubp->inst, mpcc_id); - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); ASSERT(new_mpcc != NULL); hubp->opp_id = pipe_ctx->stream_res.opp->inst; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 33a36c02b2f8..01901b08644c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -150,10 +150,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc, const struct tg_color *solid_color, int width, int height, int offset); -void dcn20_update_visual_confirm_color(struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct tg_color *color, - int mpcc_id); - #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 7c5817c426fa..4192c522e59a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -102,7 +102,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index 1aeb04fbd89d..75472d53ff52 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -496,7 +496,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); return; } @@ -521,7 +521,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) dc->res_pool->mpc, mpcc_id); /* Call MPC to insert new plane */ - dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, mpc_tree_params, &blnd_cfg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c index 9c16633e473a..92dd4cddbab8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c @@ -91,7 +91,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = { .enable_dp_link_output = dce110_enable_dp_link_output, .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn201_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index fe1a8e2e08ef..8b58ce1db035 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .is_abm_supported = dcn21_is_abm_supported, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn21_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 3216d10c58ba..18e94d8ae54f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .disable_link_output = dce110_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .is_abm_supported = dcn21_is_abm_supported }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index 6192851c59ed..257df8660b4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -107,7 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 8598ea233ef3..ba9e7dee6e5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -110,7 +110,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn31_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index ed8a1b94c006..7a28c7bb25d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -112,7 +112,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn314_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8085f2acb1a9..24a890d879b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -109,7 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 8c60b88c7d1a..d8d8fcd5ef1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1324,6 +1324,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, int i, pipe_idx, active_hubp_count = 0; bool usr_retraining_support = false; bool unbounded_req_enabled = false; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; dc_assert_fp_enabled(); @@ -1405,6 +1406,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0) + context->res_ctx.pipe_ctx[i].has_vactive_margin = true; + else + context->res_ctx.pipe_ctx[i].has_vactive_margin = false; + /* MALL Allocation Sizes */ /* count from active, top pipes per plane only */ if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && @@ -2015,6 +2021,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = true; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank; } else { /* Restore FCLK latency and re-run validation to go back to original validation * output if we find that enabling FPO does not give us any benefit (i.e. lower diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 2eb597a24425..b4c1cc6dc857 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -426,6 +426,8 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + struct tg_color visual_confirm_color; + bool has_vactive_margin; }; /* Data used for dynamic link encoder assignment. diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 88ac723d10aa..df160c6a630c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -257,7 +257,6 @@ struct hw_sequencer_funcs { void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, - struct tg_color *color, int mpcc_id); void (*update_phantom_vp_position)(struct dc *dc, @@ -294,6 +293,7 @@ void get_surface_visual_confirm_color( void get_subvp_visual_confirm_color( struct dc *dc, + struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); @@ -306,4 +306,11 @@ void get_mpctree_visual_confirm_color( void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); + +void get_mclk_switch_visual_confirm_color( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + #endif /* __DC_HW_SEQUENCER_H__ */ From fe9fa3859b66caf4a6923598c8e343b8a32ec5d1 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 5 May 2023 11:06:26 -0400 Subject: [PATCH 234/861] drm/amd/display: Make unbounded req update separate from dlg/ttu [Description] - Updates to unbounded requesting should not be conditional on updates to dlg / ttu, as this could prevent unbounded requesting from being updated if dlg / ttu does not change Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 ++++++++--- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index b3e187b1347d..e74c3ce561ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1361,6 +1361,7 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx new_pipe->update_flags.bits.dppclk = 1; new_pipe->update_flags.bits.hubp_interdependent = 1; new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.unbounded_req = 1; new_pipe->update_flags.bits.gamut_remap = 1; new_pipe->update_flags.bits.scaler = 1; new_pipe->update_flags.bits.viewport = 1; @@ -1504,6 +1505,9 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; } + + if (old_pipe->unbounded_req != new_pipe->unbounded_req) + new_pipe->update_flags.bits.unbounded_req = 1; } static void dcn20_update_dchubp_dpp( @@ -1537,10 +1541,11 @@ static void dcn20_update_dchubp_dpp( &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); - - if (hubp->funcs->set_unbounded_requesting) - hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); } + + if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) + hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); + if (pipe_ctx->update_flags.bits.hubp_interdependent) hubp->funcs->hubp_setup_interdependent( hubp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b4c1cc6dc857..d8dd143cf6ea 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -374,6 +374,7 @@ union pipe_update_flags { uint32_t viewport : 1; uint32_t plane_changed : 1; uint32_t det_size : 1; + uint32_t unbounded_req : 1; } bits; uint32_t raw; }; From 50a32b8cf4d7624eb8606b4cb7bc6dee37703da6 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 1 May 2023 16:30:54 -0600 Subject: [PATCH 235/861] drm/amd/display: Remove unnecessary variable There is no need to use dc_version in the dc_construct_ctx since this value is copied to dc_ctx->dce_version later. This commit removes the extra steps. Reviewed-by: Alex Hung Acked-by: Aurabindo Pillai Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2c639cd346d7..e664a77d05eb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -857,7 +857,6 @@ static bool dc_construct_ctx(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; - enum dce_version dc_version = DCE_VERSION_UNKNOWN; dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); if (!dc_ctx) @@ -875,8 +874,7 @@ static bool dc_construct_ctx(struct dc *dc, /* Create logger */ - dc_version = resource_parse_asic_id(init_params->asic_id); - dc_ctx->dce_version = dc_version; + dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { From 3a31e8b89b7240d9a17ace8a1ed050bdcb560f9e Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Tue, 25 Apr 2023 14:02:02 -0400 Subject: [PATCH 236/861] drm/amd/display: Remove v_startup workaround for dcn3+ [Why] Calls to dcn20_adjust_freesync_v_startup are no longer needed as of dcn3+ and can cause underflow in some cases [How] Move calls to dcn20_adjust_freesync_v_startup up into validate_bandwidth for dcn2.x Reviewed-by: Jun Lei Acked-by: Aurabindo Pillai Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 3407f9a2c6a1..8ae5ddbd1b27 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1099,10 +1099,6 @@ void dcn20_calculate_dlg_params(struct dc *dc, context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; - if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) - dcn20_adjust_freesync_v_startup( - &context->res_ctx.pipe_ctx[i].stream->timing, - &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); pipe_idx++; } @@ -1931,6 +1927,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; + int i = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); @@ -1954,6 +1951,15 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + } + BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; @@ -2226,6 +2232,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; + int i = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); @@ -2254,6 +2261,15 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + } + BW_VAL_TRACE_END_WATERMARKS(); goto validate_out; From 629b8ede8b93428b8d124d343b5fbb57ab64d5a8 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 8 May 2023 00:32:41 -0400 Subject: [PATCH 237/861] drm/amd/display: 3.2.236 Acked-by: Aurabindo Pillai Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8be2e6d6d888..2dff1a5cf3b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.235" +#define DC_VER "3.2.236" #define MAX_SURFACES 3 #define MAX_PLANES 6 From f4caf5842652f08e024741ef6d423cb0c101d863 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 14 Sep 2022 16:35:50 +0800 Subject: [PATCH 238/861] drm/amdgpu: introduce vmhub definition for multi-partition cases (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v1: Each partition has its own gfxhub or mmhub. adjust the num of MAX_VMHUBS and the GFXHUB/MMHUB layout (Le) v2: re-design the AMDGPU_GFXHUB/AMDGPU_MMHUB layout (Le) v3: apply the gfxhub/mmhub layout to new IPs (Hawking) v4: fix up gmc11 (Alex) v5: rebase (Alex) Signed-off-by: Le Ma Acked-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 10 +-- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c | 12 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c | 10 +-- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 33 +++++---- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 26 ++++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 78 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 4 +- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 2 +- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c | 10 +-- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 12 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 10 +-- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 4 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 8 +-- drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 2 +- 47 files changed, 204 insertions(+), 204 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index a46285841d17..f0a136d35279 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -736,7 +736,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); } else { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); + amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 87e1a1a9f298..488b3bb6dcb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -315,7 +315,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; ring->xcc_id = xcc_id; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); if (xcc_id >= 1) ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start + xcc_id - 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 4e2531758866..0a4e5fcfec6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -670,7 +670,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = (hub_type == AMDGPU_GFXHUB_0) ? + tmp = (hub_type == AMDGPU_GFXHUB(0)) ? RREG32_SOC15_IP(GC, reg) : RREG32_SOC15_IP(MMHUB, reg); @@ -679,7 +679,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, else tmp &= ~hub->vm_cntx_cntl_vm_fault; - (hub_type == AMDGPU_GFXHUB_0) ? + (hub_type == AMDGPU_GFXHUB(0)) ? WREG32_SOC15_IP(GC, reg, tmp) : WREG32_SOC15_IP(MMHUB, reg, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b6bd667df676..c3964c14f215 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2374,12 +2374,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, - AMDGPU_GFXHUB_0); + AMDGPU_GFXHUB(0)); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB(0)); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6f085f0b4ef3..9f5d32b0fda1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -111,11 +111,14 @@ struct amdgpu_mem_stats; /* Reserve 4MB VRAM for page tables */ #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) -/* max number of VMHUB */ -#define AMDGPU_MAX_VMHUBS 3 -#define AMDGPU_GFXHUB_0 0 -#define AMDGPU_MMHUB_0 1 -#define AMDGPU_MMHUB_1 2 +/* + * max number of VMHUB + * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1 + */ +#define AMDGPU_MAX_VMHUBS 13 +#define AMDGPU_GFXHUB(x) (x) +#define AMDGPU_MMHUB0(x) (8 + x) +#define AMDGPU_MMHUB1(x) (8 + 4 + x) /* Reserve 2MB at top/bottom of address space for kernel use */ #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8e86b2c23c0a..7b585141e10e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4461,7 +4461,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -4490,7 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX10_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -4978,7 +4978,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { nv_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f77779c31043..790df2cc3480 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -906,7 +906,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -937,7 +937,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX11_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -1707,7 +1707,7 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { soc21_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); @@ -4190,7 +4190,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) false : true; adev->gfxhub.funcs->set_fault_enable_default(adev, value); - amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46577b59cb04..91814dc083c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2005,7 +2005,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -2105,7 +2105,7 @@ static int gfx_v9_0_sw_init(void *handle) /* disable scheduler on the real ring */ ring->no_scheduler = true; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -2123,7 +2123,7 @@ static int gfx_v9_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; ring->is_sw_ring = true; hw_prio = amdgpu_sw_ring_priority(i); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio, NULL); @@ -2393,7 +2393,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ if (i == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index d648a29c33e0..ec7c049c5952 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1935,7 +1935,7 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) u32 status = 0; struct amdgpu_vmhub *hub; - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; status = RREG32(hub->vm_l2_pro_fault_status); /* reset page fault status */ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9d17dcfae130..f5104b982633 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -757,7 +757,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d.%d", ring->xcc_id, ring->me, ring->pipe, ring->queue); @@ -996,7 +996,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { for (j = 0; j < adev->gfx.num_xcd; j++) { soc15_grbm_select(adev, 0, 0, 0, i, j); /* CP and shaders */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ab2325f6c7ac..d94cc1ec7242 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -40,7 +40,7 @@ static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -247,7 +247,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -307,7 +307,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -338,7 +338,7 @@ static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -411,7 +411,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 79af32bb078c..9c385ce3a8c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -42,7 +42,7 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; for (i = 0; i < adev->gfx.num_xcd; i++) { @@ -291,7 +291,7 @@ static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned num_level, block_size; uint32_t tmp; int i, j; @@ -357,7 +357,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i, j; for (j = 0; j < adev->gfx.num_xcd; j++) { @@ -406,7 +406,7 @@ static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i, j; @@ -483,7 +483,7 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 9b3a02527318..f173a61c6c15 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -120,7 +120,7 @@ static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -282,7 +282,7 @@ static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -331,7 +331,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -360,7 +360,7 @@ static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -433,7 +433,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = { static void gfxhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 4aacbbec31e2..d8fc3e8088cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -123,7 +123,7 @@ static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -291,7 +291,7 @@ static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -340,7 +340,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -381,7 +381,7 @@ static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev) static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -462,7 +462,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = { static void gfxhub_v2_1_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, @@ -651,7 +651,7 @@ static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev) static void gfxhub_v2_1_halt(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; int time = 1000; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index 13712640fa46..c53147f9c9fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -119,7 +119,7 @@ static u64 gfxhub_v3_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -290,7 +290,7 @@ static void gfxhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -339,7 +339,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -380,7 +380,7 @@ static int gfxhub_v3_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v3_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -463,7 +463,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_vmhub_funcs = { static void gfxhub_v3_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index 6e0bd628c889..ae777487d72e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -122,7 +122,7 @@ static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -295,7 +295,7 @@ static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -344,7 +344,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -373,7 +373,7 @@ static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev) static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -451,7 +451,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = { static void gfxhub_v3_0_3_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5697b66bf0de..ea2a448147e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -76,7 +76,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -84,11 +84,11 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -96,7 +96,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -149,7 +149,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); @@ -212,8 +212,7 @@ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -249,7 +248,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int i; unsigned char hub_ip = 0; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -284,7 +283,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * Issue a dummy read to wait for the ACK register to be cleared * to avoid a false ACK due to the new fast GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && + if ((vmhub == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, hub_ip); @@ -361,19 +360,19 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, mutex_lock(&adev->mman.gtt_window_lock); - if (vmhub == AMDGPU_MMHUB_0) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); + if (vmhub == AMDGPU_MMHUB0(0)) { + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB0(0), 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } - BUG_ON(vmhub != AMDGPU_GFXHUB_0); + BUG_ON(vmhub != AMDGPU_GFXHUB(0)); if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || amdgpu_in_reset(adev) || ring->sched.ready == false) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB(0), 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } @@ -466,7 +465,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } if (!adev->enable_mes) break; @@ -534,7 +533,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1075,9 +1074,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (!adev->in_s0ix) adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); if (!adev->in_s0ix) - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 2f570fb5febe..fb2ac31cbba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -64,7 +64,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -72,11 +72,11 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -84,7 +84,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -110,7 +110,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if (entry->vmid_src == AMDGPU_GFXHUB_0) + if (entry->vmid_src == AMDGPU_GFXHUB(0)) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); @@ -170,7 +170,7 @@ static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -202,7 +202,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int i; unsigned char hub_ip = 0; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -251,7 +251,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, hub->eng_distance * eng, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ - if ((vmhub != AMDGPU_GFXHUB_0) && + if ((vmhub != AMDGPU_GFXHUB(0)) && (hub->vm_l2_bank_select_reserved_cid2) && !amdgpu_sriov_vf(adev)) { inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); @@ -284,7 +284,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { - if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron) + if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) return; /* flush hdp cache */ @@ -369,7 +369,7 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v11_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } } } @@ -435,7 +435,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; @@ -886,7 +886,7 @@ static int gmc_v11_0_sw_fini(void *handle) static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32(hub->vm_contexts_disable, 0); return; @@ -921,7 +921,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) false : true; adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6ae5cee9b64b..193ba4d912a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -491,20 +491,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) continue; - if (j == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP(GC, reg); - else + if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); + else + tmp = RREG32_SOC15_IP(GC, reg); tmp &= ~bits; - if (j == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP(GC, reg, tmp); - else + if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); + else + WREG32_SOC15_IP(GC, reg, tmp); } } break; @@ -519,20 +519,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) continue; - if (j == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP(GC, reg); - else + if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); + else + tmp = RREG32_SOC15_IP(GC, reg); tmp |= bits; - if (j == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP(GC, reg, tmp); - else + if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); + else + WREG32_SOC15_IP(GC, reg, tmp); } } break; @@ -605,13 +605,13 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (entry->client_id == SOC15_IH_CLIENTID_VMC) { hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB_0]; + hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB_1]; + hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); @@ -636,7 +636,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32(hub->vm_l2_pro_fault_status); @@ -649,7 +649,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { + if (hub == &adev->vmhub[AMDGPU_GFXHUB(0)]) { dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], @@ -759,8 +759,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) return false; - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0) || + vmhub == AMDGPU_MMHUB1(0)) && (!amdgpu_sriov_vf(adev)) && (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && (adev->apu_flags & AMD_APU_IS_PICASSO)))); @@ -849,11 +849,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acquire */ - if (vmhub == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); - else + if (vmhub >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); - + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); if (tmp & 0x1) break; udelay(1); @@ -864,27 +863,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } do { - if (vmhub == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); - else + if (vmhub >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + else + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); /* * Issue a dummy read to wait for the ACK register to * be cleared to avoid a false ACK due to the new fast * GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && + if ((vmhub == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng); for (j = 0; j < adev->usec_timeout; j++) { - if (vmhub == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - else + if (vmhub >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); if (tmp & (1 << vmid)) break; udelay(1); @@ -900,10 +898,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - if (vmhub == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + if (vmhub >= AMDGPU_MMHUB0(0)) + WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); else - WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); } spin_unlock(&adev->gmc.invalidate_lock); @@ -994,7 +992,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v9_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } break; } @@ -1060,10 +1058,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, uint32_t reg; /* Do nothing because there's no lut register for mmhub1. */ - if (ring->vm_hub == AMDGPU_MMHUB_1) + if (ring->vm_hub == AMDGPU_MMHUB1(0)) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1947,7 +1945,7 @@ static int gmc_v9_0_hw_init(void *handle) adev->mmhub.funcs->set_fault_enable_default(adev, value); } for (i = 0; i < adev->num_vmhubs; ++i) { - if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) continue; gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index a3076eb8af6a..71fe7f6f9889 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -485,7 +485,7 @@ int jpeg_v1_0_sw_init(void *handle) return r; ring = &adev->jpeg.inst->ring_dec; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 0eddf7c824a7..3a43e42f4834 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -86,7 +86,7 @@ static int jpeg_v2_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9..259b7ba6a842 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -128,9 +128,9 @@ static int jpeg_v2_5_sw_init(void *handle) ring = &adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 1c2292cc5f2c..c55386c22311 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -101,7 +101,7 @@ static int jpeg_v3_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 77e1e64aa1d1..d7d5ffc29393 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -108,7 +108,7 @@ static int jpeg_v4_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 4560476c7c31..f1a6abdad21b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -149,7 +149,7 @@ static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes, { struct amdgpu_device *adev = mes->adev; union MESAPI__ADD_QUEUE mes_add_queue_pkt; - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 3adb450eec07..9791f3581786 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -164,7 +164,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, { struct amdgpu_device *adev = mes->adev; union MESAPI__ADD_QUEUE mes_add_queue_pkt; - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 15e7cbeae75b..fb91b31056ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -54,7 +54,7 @@ static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -229,7 +229,7 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -285,7 +285,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -338,7 +338,7 @@ static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -415,7 +415,7 @@ static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 73afbf2facc9..9086f2fdfaf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -54,7 +54,7 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -261,7 +261,7 @@ static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -319,7 +319,7 @@ static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -348,7 +348,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -425,7 +425,7 @@ static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_7_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 342d1702104c..9ec06f9db761 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -53,7 +53,7 @@ static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -253,7 +253,7 @@ static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -311,7 +311,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -352,7 +352,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -426,7 +426,7 @@ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_8_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 278e32db878d..8f76c6ecf50a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -187,7 +187,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -362,7 +362,7 @@ static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -412,7 +412,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -441,7 +441,7 @@ static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -520,7 +520,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { static void mmhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index fcf2813e70db..8bd0fc8d9d25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -121,7 +121,7 @@ static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -280,7 +280,7 @@ static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -330,7 +330,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -373,7 +373,7 @@ static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev) static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -446,7 +446,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = { static void mmhub_v2_3_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 17a792616979..441379e91cfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -136,7 +136,7 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -319,7 +319,7 @@ static void mmhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -369,7 +369,7 @@ static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -398,7 +398,7 @@ static int mmhub_v3_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -477,7 +477,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_vmhub_funcs = { static void mmhub_v3_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 26509b6b8c24..12c7f4b46ea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -138,7 +138,7 @@ static void mmhub_v3_0_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -306,7 +306,7 @@ static void mmhub_v3_0_1_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -356,7 +356,7 @@ static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_1_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -385,7 +385,7 @@ static int mmhub_v3_0_1_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_1_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -459,7 +459,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_1_vmhub_funcs = { static void mmhub_v3_0_1_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c index 26abbc6a47ab..5dadc85abf7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c @@ -129,7 +129,7 @@ mmhub_v3_0_2_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v3_0_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -311,7 +311,7 @@ static void mmhub_v3_0_2_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -361,7 +361,7 @@ static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -390,7 +390,7 @@ static int mmhub_v3_0_2_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -469,7 +469,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_2_vmhub_funcs = { static void mmhub_v3_0_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 72083e96222f..e790f890aec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -57,7 +57,7 @@ static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid, uint32_t vmid, uint64_t value) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -294,7 +294,7 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev, static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned int num_level, block_size; uint32_t tmp; int i; @@ -363,7 +363,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev, int hubid) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -404,7 +404,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i, j; @@ -507,8 +507,8 @@ static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v9_4_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = - {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]}; + struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = { + &adev->vmhub[AMDGPU_MMHUB0(0)], &adev->vmhub[AMDGPU_MMHUB1(0)]}; int i; for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 9295ac7edd56..50b6eb9bcfda 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1825,12 +1825,12 @@ static int sdma_v4_0_sw_init(void *handle) /* * On Arcturus, SDMA instance 5~7 has a different vmhub - * type(AMDGPU_MMHUB_1). + * type(AMDGPU_MMHUB1). */ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1851,9 +1851,9 @@ static int sdma_v4_0_sw_init(void *handle) ring->doorbell_index += 0x400; if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 64dcaa2670dd..7efe7c43fffb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1309,7 +1309,7 @@ static int sdma_v4_4_2_sw_init(void *handle) /* doorbell size is 2 dwords, get DWORD offset */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1328,7 +1328,7 @@ static int sdma_v4_4_2_sw_init(void *handle) */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->doorbell_index += 0x400; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 92e1299be021..a0077cf41295 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1389,7 +1389,7 @@ static int sdma_v5_0_sw_init(void *handle) (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index ca7e8757d78e..efa2c84ee78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1253,7 +1253,7 @@ static int sdma_v5_2_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 3d9a80511a45..79d09792d2ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1298,7 +1298,7 @@ static int sdma_v6_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e32b656b3dab..abaa4463e906 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -444,7 +444,7 @@ static int uvd_v7_0_sw_init(void *handle) continue; if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "uvd_%d", ring->me); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0, @@ -455,7 +455,7 @@ static int uvd_v7_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 57b85bb6a1e4..e0b70cd3b697 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -466,7 +466,7 @@ static int vce_v4_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); ring = &adev->vce.ring[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 761c28fa6ec1..f877c39c7cdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -120,7 +120,7 @@ static int vcn_v1_0_sw_init(void *handle) return r; ring = &adev->vcn.inst->ring_dec; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -142,7 +142,7 @@ static int vcn_v1_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); ring = &adev->vcn.inst->ring_enc[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, hw_prio, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 7c2b3aa48083..c975aed2f6c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -129,7 +129,7 @@ static int vcn_v2_0_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -160,7 +160,7 @@ static int vcn_v2_0_sw_init(void *handle) ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); if (!amdgpu_sriov_vf(adev)) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; else diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ab0b45d0ead1..7044bd7c9f62 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -188,9 +188,9 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? 2*j : 8*j); if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec_%d", j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, @@ -208,9 +208,9 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3eab186261aa..70fefbf26c48 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -189,7 +189,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; } - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, AMDGPU_RING_PRIO_DEFAULT, @@ -213,7 +213,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; } - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, hw_prio, &adev->vcn.inst[i].sched_score); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bf0674039598..81446e6996df 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -149,7 +149,7 @@ static int vcn_v4_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1; else ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, From b35ce49ab9ca2bc6a59d4441efc5039de80643ce Mon Sep 17 00:00:00 2001 From: Le Ma Date: Sun, 19 Dec 2021 11:03:59 +0800 Subject: [PATCH 239/861] drm/amdgpu: assign register address for vmhub object on each XCD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each XCD has its own gfxhub. v2: switch to the new VMHUB layout v3: fix mistake Signed-off-by: Le Ma Reviewed-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 56 ++++++++++++++---------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 9c385ce3a8c4..e5016fea1f28 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -483,33 +483,43 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + struct amdgpu_vmhub *hub; + int i; - hub->ctx0_ptb_addr_lo32 = - SOC15_REG_OFFSET(GC, 0, + for (i = 0; i < adev->gfx.num_xcd; i++) { + hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(GC, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); - hub->ctx0_ptb_addr_hi32 = - SOC15_REG_OFFSET(GC, 0, + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(GC, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); - hub->vm_inv_eng0_sem = - SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_SEM); - hub->vm_inv_eng0_req = - SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_REQ); - hub->vm_inv_eng0_ack = - SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ACK); - hub->vm_context0_cntl = - SOC15_REG_OFFSET(GC, 0, regVM_CONTEXT0_CNTL); - hub->vm_l2_pro_fault_status = - SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS); - hub->vm_l2_pro_fault_cntl = - SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, i, regVM_INVALIDATE_ENG0_SEM); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(GC, i, regVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(GC, i, regVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(GC, i, regVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(GC, i, + regVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); - hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; - hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ; - hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - - regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + hub->ctx_distance = regVM_CONTEXT1_CNTL - + regVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = + regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - + regVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = + regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + } } From d9426c3d9b4e91dda4f1f1684f9296762fafe0de Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 20 Dec 2021 16:06:25 +0800 Subject: [PATCH 240/861] drm/amdgpu: add bitmask to iterate vmhubs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the layout of VMHUB definition has been changed to cover multiple XCD/AID case, the original num_vmhubs is not appropriate to do vmhub iteration any more. Drop num_vmhubs and introduce vmhubs_mask instead. v2: switch to the new VMHUB layout v3: use DECLARE_BITMAP to define vmhubs_mask Signed-off-by: Le Ma Reviewed-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 21 ++++++++++++--------- 9 files changed, 25 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 957b18bda4a7..0f163d266812 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -829,7 +829,7 @@ struct amdgpu_device { dma_addr_t dummy_page_addr; struct amdgpu_vm_manager vm_manager; struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; - unsigned num_vmhubs; + DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS); /* memory management */ struct amdgpu_mman mman; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f0a136d35279..5afbcc390d89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -733,7 +733,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, if (adev->family == AMDGPU_FAMILY_AI) { int i; - for (i = 0; i < adev->num_vmhubs; i++) + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); } else { amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 01cb89ffbd56..6b12f4a75fc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -182,7 +182,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, } mb(); amdgpu_device_flush_hdp(adev, NULL); - for (i = 0; i < adev->num_vmhubs; i++) + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); drm_dev_exit(idx); @@ -264,7 +264,7 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev) mb(); amdgpu_device_flush_hdp(adev, NULL); - for (i = 0; i < adev->num_vmhubs; i++) + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index ea2a448147e3..ff96f11c2adf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -460,7 +460,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, &queried_pasid); if (ret && queried_pasid == pasid) { if (all_hub) { - for (i = 0; i < adev->num_vmhubs; i++) + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) gmc_v10_0_flush_gpu_tlb(adev, vmid, i, flush_type); } else { @@ -928,7 +928,8 @@ static int gmc_v10_0_sw_init(void *handle) case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 7): - adev->num_vmhubs = 2; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index fb2ac31cbba7..3453f1c0e066 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -364,7 +364,7 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, &queried_pasid); if (ret && queried_pasid == pasid) { if (all_hub) { - for (i = 0; i < adev->num_vmhubs; i++) + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) gmc_v11_0_flush_gpu_tlb(adev, vmid, i, flush_type); } else { @@ -779,7 +779,8 @@ static int gmc_v11_0_sw_init(void *handle) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): - adev->num_vmhubs = 2; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index b7dad4e67813..aa754c95a0b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -808,7 +808,7 @@ static int gmc_v6_0_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->num_vmhubs = 1; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); if (adev->flags & AMD_IS_APU) { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 402960b0174e..81609a2b226f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -977,7 +977,7 @@ static int gmc_v7_0_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->num_vmhubs = 1; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); if (adev->flags & AMD_IS_APU) { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 504c1b34dab7..d48e33738a88 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1093,7 +1093,7 @@ static int gmc_v8_0_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->num_vmhubs = 1; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); if (adev->flags & AMD_IS_APU) { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 193ba4d912a6..d4bfb5f8308a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -481,7 +481,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - for (j = 0; j < adev->num_vmhubs; j++) { + for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { hub = &adev->vmhub[j]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; @@ -509,7 +509,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, } break; case AMDGPU_IRQ_STATE_ENABLE: - for (j = 0; j < adev->num_vmhubs; j++) { + for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { hub = &adev->vmhub[j]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; @@ -803,7 +803,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, u32 j, inv_req, inv_req2, tmp; struct amdgpu_vmhub *hub; - BUG_ON(vmhub >= adev->num_vmhubs); + BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS); hub = &adev->vmhub[vmhub]; if (adev->gmc.xgmi.num_physical_nodes && @@ -987,7 +987,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, &queried_pasid); if (ret && queried_pasid == pasid) { if (all_hub) { - for (i = 0; i < adev->num_vmhubs; i++) + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) gmc_v9_0_flush_gpu_tlb(adev, vmid, i, flush_type); } else { @@ -1684,7 +1684,8 @@ static int gmc_v9_0_sw_init(void *handle) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): - adev->num_vmhubs = 2; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); @@ -1701,8 +1702,8 @@ static int gmc_v9_0_sw_init(void *handle) case IP_VERSION(9, 3, 0): case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 3): - adev->num_vmhubs = 2; - + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); /* * To fulfill 4-level page support, @@ -1718,7 +1719,9 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; case IP_VERSION(9, 4, 1): - adev->num_vmhubs = 3; + set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); + set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask); /* Keep the vm size same with Vega20 */ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); @@ -1944,7 +1947,7 @@ static int gmc_v9_0_hw_init(void *handle) adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); } - for (i = 0; i < adev->num_vmhubs; ++i) { + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) continue; gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); From ce8a12a532ed62d7037be91c5714243fdfa9f672 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 20 Dec 2021 16:42:20 +0800 Subject: [PATCH 241/861] drm/amdgpu: init vmhubs bitmask for GC 9.4.3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each XCD owns one GFXHUB. v2: switch to the new VMHUB layout Signed-off-by: Le Ma Acked-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d4bfb5f8308a..6da85365e5aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1701,7 +1701,6 @@ static int gmc_v9_0_sw_init(void *handle) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 3, 0): case IP_VERSION(9, 4, 2): - case IP_VERSION(9, 4, 3): set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); @@ -1727,6 +1726,12 @@ static int gmc_v9_0_sw_init(void *handle) amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; + case IP_VERSION(9, 4, 3): + bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), adev->gfx.num_xcd); + bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), 1); + + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + break; default: break; } From 3566938b3491bb3aad701b487130f3efc363e2dc Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 20 Dec 2021 16:06:25 +0800 Subject: [PATCH 242/861] drm/amdgpu: assign different AMDGPU_GFXHUB for rings on each xcc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the xcc_id to AMDGPU_GFXHUB(x) Signed-off-by: Le Ma Reviewed-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 488b3bb6dcb1..e5ff0bf9f23b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -315,7 +315,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; ring->xcc_id = xcc_id; - ring->vm_hub = AMDGPU_GFXHUB(0); + ring->vm_hub = AMDGPU_GFXHUB(xcc_id); if (xcc_id >= 1) ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start + xcc_id - 1; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index f5104b982633..064cd02451c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -757,7 +757,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB(0); + ring->vm_hub = AMDGPU_GFXHUB(xcc_id); sprintf(ring->name, "comp_%d.%d.%d.%d", ring->xcc_id, ring->me, ring->pipe, ring->queue); From 98a54e88e87f7291d4bbc6ec646c498f64ae042f Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 17 May 2022 22:20:10 +0800 Subject: [PATCH 243/861] drm/amdgpu: add sysfs node for compute partition mode Add current/available compute partitin mode sysfs node. v2: make the sysfs node as IP independent one in amdgpu_gfx.c Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 132 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 6 + drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 3 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 67 +++++++++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 32 +++++ 6 files changed, 241 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e94507a10e15..f432064a0535 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3634,6 +3634,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->srbm_mutex); mutex_init(&adev->gfx.pipe_reserve_mutex); mutex_init(&adev->gfx.gfx_off_mutex); + mutex_init(&adev->gfx.partition_mutex); mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e5ff0bf9f23b..4a4d71ff9b95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1174,3 +1174,135 @@ bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? adev->gfx.num_xcc_per_xcp : 1)); } + +static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_gfx_partition mode; + char *partition_mode; + + mode = adev->gfx.funcs->query_partition_mode(adev); + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + partition_mode = "SPX"; + break; + case AMDGPU_DPX_PARTITION_MODE: + partition_mode = "DPX"; + break; + case AMDGPU_TPX_PARTITION_MODE: + partition_mode = "TPX"; + break; + case AMDGPU_QPX_PARTITION_MODE: + partition_mode = "QPX"; + break; + case AMDGPU_CPX_PARTITION_MODE: + partition_mode = "CPX"; + break; + default: + partition_mode = "UNKNOWN"; + break; + } + + return sysfs_emit(buf, "%s\n", partition_mode); +} + +static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, + struct device_attribute *addr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_gfx_partition mode; + int ret; + + if (adev->gfx.num_xcd % 2 != 0) + return -EINVAL; + + if (!strncasecmp("SPX", buf, strlen("SPX"))) { + mode = AMDGPU_SPX_PARTITION_MODE; + } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { + if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8) + return -EINVAL; + mode = AMDGPU_DPX_PARTITION_MODE; + } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { + if (adev->gfx.num_xcd != 6) + return -EINVAL; + mode = AMDGPU_TPX_PARTITION_MODE; + } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { + if (adev->gfx.num_xcd != 8) + return -EINVAL; + mode = AMDGPU_QPX_PARTITION_MODE; + } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { + mode = AMDGPU_CPX_PARTITION_MODE; + } else { + return -EINVAL; + } + + mutex_lock(&adev->gfx.partition_mutex); + + ret = adev->gfx.funcs->switch_partition_mode(adev, mode); + + mutex_unlock(&adev->gfx.partition_mutex); + + if (ret) + return ret; + + return count; +} + +static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + char *supported_partition; + + /* TBD */ + switch (adev->gfx.num_xcd) { + case 8: + supported_partition = "SPX, DPX, QPX, CPX"; + break; + case 6: + supported_partition = "SPX, TPX, CPX"; + break; + case 4: + supported_partition = "SPX, DPX, CPX"; + break; + /* this seems only existing in emulation phase */ + case 2: + supported_partition = "SPX, CPX"; + break; + default: + supported_partition = "Not supported"; + break; + } + + return sysfs_emit(buf, "%s\n", supported_partition); +} + +static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, + amdgpu_gfx_get_current_compute_partition, + amdgpu_gfx_set_compute_partition); + +static DEVICE_ATTR(available_compute_partition, S_IRUGO, + amdgpu_gfx_get_available_compute_partition, NULL); + +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +{ + int r; + + r = device_create_file(adev->dev, &dev_attr_current_compute_partition); + if (r) + return r; + + r = device_create_file(adev->dev, &dev_attr_available_compute_partition); + if (r) + return r; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2755f00ac19a..8df36527aee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -259,6 +259,10 @@ struct amdgpu_gfx_funcs { void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); int (*get_gfx_shadow_info)(struct amdgpu_device *adev, struct amdgpu_gfx_shadow_info *shadow_info); + enum amdgpu_gfx_partition + (*query_partition_mode)(struct amdgpu_device *adev); + int (*switch_partition_mode)(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode); }; struct sq_work { @@ -394,6 +398,7 @@ struct amdgpu_gfx { enum amdgpu_gfx_partition partition_mode; uint32_t num_xcd; uint32_t num_xcc_per_xcp; + struct mutex partition_mutex; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) @@ -478,4 +483,5 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index c686ff4bcc39..6a636c34b717 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -95,6 +95,9 @@ struct amdgpu_nbio_funcs { void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev); void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); + u32 (*get_compute_partition_mode)(struct amdgpu_device *adev); + void (*set_compute_partition_mode)(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode); }; struct amdgpu_nbio { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 064cd02451c2..1d15db9423c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -662,6 +662,67 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, soc15_grbm_select(adev, me, pipe, q, vm, 0); } +static enum amdgpu_gfx_partition +gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) +{ + enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + + if (adev->nbio.funcs->get_compute_partition_mode) + mode = adev->nbio.funcs->get_compute_partition_mode(adev); + + return mode; +} + +static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode) +{ + u32 tmp = 0; + int num_xcc_per_partition, i; + + if (mode == adev->gfx.partition_mode) + return mode; + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcc_per_partition = 1; + break; + default: + return -EINVAL; + } + + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + for (i = 0; i < adev->gfx.num_xcd; i++) { + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, + num_xcc_per_partition); + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, + i % num_xcc_per_partition); + WREG32_SOC15(GC, i, regCP_HYP_XCP_CTL, tmp); + } + + if (adev->nbio.funcs->set_compute_partition_mode) + adev->nbio.funcs->set_compute_partition_mode(adev, mode); + + adev->gfx.num_xcc_per_xcp = num_xcc_per_partition; + adev->gfx.partition_mode = mode; + + return 0; +} + static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, .select_se_sh = &gfx_v9_4_3_select_se_sh, @@ -669,6 +730,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, + .query_partition_mode = &gfx_v9_4_3_query_compute_partition, + .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, }; static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) @@ -858,6 +921,10 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index a331a59c49e3..bdb84a53f0e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -347,6 +347,36 @@ static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev, DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } +static enum amdgpu_gfx_partition nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP); + + if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, SPX_SUPPORT)) + return AMDGPU_SPX_PARTITION_MODE; + else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, DPX_SUPPORT)) + return AMDGPU_DPX_PARTITION_MODE; + else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, TPX_SUPPORT)) + return AMDGPU_TPX_PARTITION_MODE; + else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, CPX_SUPPORT)) + return AMDGPU_CPX_PARTITION_MODE; + else + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + +static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP); + tmp &= ~0x1f; + tmp |= 1 << mode; + + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP, tmp); +} + const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -366,4 +396,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_clockgating_state = nbio_v7_9_get_clockgating_state, .ih_control = nbio_v7_9_ih_control, .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, + .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, + .set_compute_partition_mode = nbio_v7_9_set_compute_partition_mode, }; From 99951878b048e39278bb05d96831353eca23b75f Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 29 Nov 2021 20:44:05 +0800 Subject: [PATCH 244/861] drm/amdgpu: make the WREG32_SOC15_xx macro to support multi GC To write regs on different GCDs, use the inst index. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 9fefd403e14f..3f6dac856d49 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -157,10 +157,10 @@ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ if (amdgpu_sriov_fullaccess(adev)) { \ - uint32_t r2 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \ - uint32_t r3 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \ - uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \ - uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \ + uint32_t r2 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \ + uint32_t r3 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \ + uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][inst][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \ + uint32_t grbm_idx = adev->reg_offset[GC_HWIP][inst][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \ if (target_reg == grbm_cntl) \ WREG32(r2, value); \ else if (target_reg == grbm_idx) \ @@ -176,13 +176,13 @@ #define WREG32_SOC15_RLC(ip, inst, reg, value) \ do { \ - uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\ + uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \ } while (0) #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \ do { \ - uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\ + uint32_t target_reg = adev->reg_offset[GC_HWIP][inst][reg##_BASE_IDX] + reg;\ WREG32_RLC_EX(prefix, target_reg, value); \ } while (0) From 0fa49d108386c201b5c2cce68066a9b8f66883a5 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Fri, 17 Dec 2021 11:27:53 +0800 Subject: [PATCH 245/861] drm/amdgpu: override partition mode through module parameter Add a module parameter to override the partition mode. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 25 +++++++++++++++++++++++-- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0f163d266812..a277bdc86057 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -242,6 +242,8 @@ extern int amdgpu_num_kcq; extern int amdgpu_vcnfw_log; extern int amdgpu_sg_display; +extern uint amdgpu_user_partt_mode; + #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 45544ebe576e..9e9da2ac5c82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -193,6 +193,7 @@ int amdgpu_smartshift_bias; int amdgpu_use_xgmi_p2p = 1; int amdgpu_vcnfw_log; int amdgpu_sg_display = -1; /* auto */ +uint amdgpu_user_partt_mode; static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -950,6 +951,18 @@ MODULE_PARM_DESC(smu_pptable_id, "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); +/** + * DOC: partition_mode (int) + * Used to override the default SPX mode. + */ +MODULE_PARM_DESC(user_partt_mode, + "specify partition mode to be used (0 = AMDGPU_SPX_PARTITION_MODE(default value), \ + 1 = AMDGPU_DPX_PARTITION_MODE, \ + 2 = AMDGPU_TPX_PARTITION_MODE, \ + 3 = AMDGPU_QPX_PARTITION_MODE, \ + 4 = AMDGPU_CPX_PARTITION_MODE)"); +module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 1d15db9423c9..2676a185c232 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2162,8 +2162,29 @@ static int gfx_v9_4_3_early_init(void *handle) /* hardcode in emulation phase */ adev->gfx.num_xcd = 1; - adev->gfx.num_xcc_per_xcp = 1; - adev->gfx.partition_mode = AMDGPU_SPX_PARTITION_MODE; + + adev->gfx.partition_mode = amdgpu_user_partt_mode; + /* calculate the num_xcc_in_xcp for the partition mode*/ + switch (amdgpu_user_partt_mode) { + case AMDGPU_SPX_PARTITION_MODE: + adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + break; + case AMDGPU_DPX_PARTITION_MODE: + adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + adev->gfx.num_xcc_per_xcp = 1; + break; + default: + adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + break; + } adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); From 62e790879efbf09edb9f262d5eb7765aeaf89809 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Thu, 23 Dec 2021 13:44:48 +0800 Subject: [PATCH 246/861] drm/amdgpu: alloc vm inv engines for every vmhub There are AMDGPU_MAX_VMHUBS of vmhub in maximum and need to init the vm_inv_engs for all of them. In this way, the below error can be ruled out. [ 217.317752] amdgpu 0000:02:00.0: amdgpu: no VM inv eng for ring sdma0 Signed-off-by: Shiwu Zhang Reviewed-by: Christian Koenig Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 0a4e5fcfec6b..b8825a0670a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -534,22 +534,21 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) * subject to change when ring number changes * Engine 17: Gart flushes */ -#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 -#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 +#define AMDGPU_VMHUB_INV_ENG_BITMAP 0x1FFF3 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = - {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, - GFXHUB_FREE_VM_INV_ENGS_BITMAP}; + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0}; unsigned i; unsigned vmhub, inv_eng; - if (adev->enable_mes) { + /* init the vm inv eng for all vmhubs */ + for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { + vm_inv_engs[i] = AMDGPU_VMHUB_INV_ENG_BITMAP; /* reserve engine 5 for firmware */ - for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) - vm_inv_engs[vmhub] &= ~(1 << 5); + if (adev->enable_mes) + vm_inv_engs[i] &= ~(1 << 5); } for (i = 0; i < adev->num_rings; ++i) { From f544afac3f34124088b981c63843a3cc48f4ee3e Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Tue, 17 May 2022 23:41:01 +0800 Subject: [PATCH 247/861] drm/amdgpu: Add kgd2kfd for GC 9.4.3 New GC (v9.4.3) and ATHUB (v1.8.0) versions are used. Add kgd_gfx_v9_4_3_* functions if registers in use of kgd_gfx_v9_* functions are changed or have different offset. Signed-off-by: Amber Lin Acked-by: Felix Kuehling Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 183 ++++++++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 30 +-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 5 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 +- 5 files changed, 206 insertions(+), 16 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 415a7fa395c4..7fb09000efc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -228,6 +228,7 @@ amdgpu-y += \ amdgpu_amdkfd_gfx_v9.o \ amdgpu_amdkfd_arcturus.o \ amdgpu_amdkfd_aldebaran.o \ + amdgpu_amdkfd_gc_9_4_3.o \ amdgpu_amdkfd_gfx_v10.o \ amdgpu_amdkfd_gfx_v10_3.o \ amdgpu_amdkfd_gfx_v11.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c new file mode 100644 index 000000000000..562e1a04160f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -0,0 +1,183 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_amdkfd_arcturus.h" +#include "amdgpu_amdkfd_gfx_v9.h" +#include "gc/gc_9_4_3_offset.h" +#include "gc/gc_9_4_3_sh_mask.h" +#include "athub/athub_1_8_0_offset.h" +#include "athub/athub_1_8_0_sh_mask.h" +#include "oss/osssys_4_0_offset.h" +#include "oss/osssys_4_0_sh_mask.h" +#include "v9_structs.h" +#include "soc15.h" + +static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, + u32 pasid, unsigned int vmid) +{ + unsigned long timeout; + + /* + * We have to assume that there is no outstanding mapping. + * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because + * a mapping is in progress or because a mapping finished + * and the SW cleared it. + * So the protocol is to always wait & clear. + */ + uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | + ATC_VMID0_PASID_MAPPING__VALID_MASK; + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, + regATC_VMID0_PASID_MAPPING) + vmid, pasid_mapping); + + timeout = jiffies + msecs_to_jiffies(10); + while (!(RREG32(SOC15_REG_OFFSET(ATHUB, 0, + regATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & + (1U << vmid))) { + if (time_after(jiffies, timeout)) { + pr_err("Fail to program VMID-PASID mapping\n"); + return -ETIME; + } + cpu_relax(); + } + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, + regATC_VMID_PASID_MAPPING_UPDATE_STATUS), + 1U << vmid); + + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, + pasid_mapping); + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid, + pasid_mapping); + + return 0; +} + +static inline struct v9_mqd *get_mqd(void *mqd) +{ + return (struct v9_mqd *)mqd; +} + +static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) +{ + struct v9_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, hqd_base, hqd_end, data; + + m = get_mqd(mqd); + + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + + /* HQD registers extend to CP_HQD_AQL_DISPATCH_ID_HI */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + hqd_base = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR); + hqd_end = SOC15_REG_OFFSET(GC, 0, regCP_HQD_AQL_DISPATCH_ID_HI); + + for (reg = hqd_base; reg <= hqd_end; reg++) + WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); + + + /* Activate doorbell logic before triggering WPTR poll. */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), + data); + + if (wptr) { + /* Don't read wptr with get_user because the user + * context may not be accessible (if this function + * runs in a work queue). Instead trigger a one-shot + * polling read from memory in the CP. This assumes + * that wptr is GPU-accessible in the queue's VMID via + * ATC or SVM. WPTR==RPTR before starting the poll so + * the CP starts fetching new commands from the right + * place. + * + * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit + * tricky. Assume that the queue didn't overflow. The + * number of valid bits in the 32-bit RPTR depends on + * the queue size. The remaining bits are taken from + * the saved 64-bit WPTR. If the WPTR wrapped, add the + * queue size. + */ + uint32_t queue_size = + 2 << REG_GET_FIELD(m->cp_hqd_pq_control, + CP_HQD_PQ_CONTROL, QUEUE_SIZE); + uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); + + if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) + guessed_wptr += queue_size; + guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); + guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; + + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_LO), + lower_32_bits(guessed_wptr)); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI), + upper_32_bits(guessed_wptr)); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), + lower_32_bits((uintptr_t)wptr)); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, + regCP_HQD_PQ_WPTR_POLL_ADDR_HI), + upper_32_bits((uintptr_t)wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, regCP_PQ_WPTR_POLL_CNTL1), + (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, + queue_id)); + } + + /* Start the EOP fetcher */ + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_EOP_RPTR), + REG_SET_FIELD(m->cp_hqd_eop_rptr, + CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE), data); + + kgd_gfx_v9_release_queue(adev); + + return 0; +} + +const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { + .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping, + .init_interrupts = kgd_gfx_v9_init_interrupts, + .hqd_load = kgd_gfx_v9_4_3_hqd_load, + .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, + .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, + .hqd_dump = kgd_gfx_v9_hqd_dump, + .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, + .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, + .hqd_destroy = kgd_gfx_v9_hqd_destroy, + .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, + .wave_control_execute = kgd_gfx_v9_wave_control_execute, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, + .set_vm_context_page_table_base = + kgd_gfx_v9_set_vm_context_page_table_base, + .program_trap_handler_settings = + kgd_gfx_v9_program_trap_handler_settings +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index ae06d1f2af93..d36219ecd3dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -59,7 +59,7 @@ static void unlock_srbm(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, +void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; @@ -68,7 +68,7 @@ static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, lock_srbm(adev, mec, pipe, queue_id, 0); } -static uint64_t get_queue_mask(struct amdgpu_device *adev, +uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + @@ -77,7 +77,7 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct amdgpu_device *adev) +void kgd_gfx_v9_release_queue(struct amdgpu_device *adev) { unlock_srbm(adev); } @@ -228,7 +228,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, m = get_mqd(mqd); - acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -280,7 +280,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, - (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); + (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ @@ -291,7 +291,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); - release_queue(adev); + kgd_gfx_v9_release_queue(adev); return 0; } @@ -307,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, m = get_mqd(mqd); - acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -343,7 +343,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq[0].ring_lock); - release_queue(adev); + kgd_gfx_v9_release_queue(adev); return r; } @@ -365,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, if (*dump == NULL) return -ENOMEM; - acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(adev); + kgd_gfx_v9_release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -487,7 +487,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, bool retval = false; uint32_t low, high; - acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -497,7 +497,7 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(adev); + kgd_gfx_v9_release_queue(adev); return retval; } @@ -532,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -561,13 +561,13 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(adev); + kgd_gfx_v9_release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(adev); + kgd_gfx_v9_release_queue(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index c7ed3bc9053c..491273a02e30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -58,3 +58,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu); void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); +void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t queue_id); +uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id); +void kgd_gfx_v9_release_queue(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 00f528eb9812..1510041a6ee1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -51,6 +51,7 @@ extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; extern const struct kfd2kgd_calls arcturus_kfd2kgd; extern const struct kfd2kgd_calls aldebaran_kfd2kgd; +extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; extern const struct kfd2kgd_calls gfx_v11_kfd2kgd; @@ -328,7 +329,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) break; case IP_VERSION(9, 4, 3): gfx_target_version = 90400; - f2g = &aldebaran_kfd2kgd; + f2g = &gc_9_4_3_kfd2kgd; break; /* Navi10 */ case IP_VERSION(10, 1, 10): From 9b4fd27601fbe7f77e7f8a8ca226211ef748844b Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 12 Jan 2022 19:42:56 +0530 Subject: [PATCH 248/861] drm/amdgpu: Use the correct API to read register Use SOC15 API so that the register offset is calculated correctly. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 2676a185c232..ad3e8cefbdb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1637,7 +1637,7 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) /* set static priority for a queue/ring */ gfx_v9_4_3_mqd_set_priority(ring, mqd); - mqd->cp_hqd_quantum = RREG32(regCP_HQD_QUANTUM); + mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, regCP_HQD_QUANTUM); /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. From 81283fee15ea6afb1e32defb369c3b96d2d6765c Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 19 Jan 2022 22:32:41 -0500 Subject: [PATCH 249/861] drm/amdgpu/: add more macro to support offset variant Add more macro to support offset variant and simplify macro SOC15_WAIT_ON_RREG. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 28 +++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 36 +++++++--------------- 3 files changed, 42 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a277bdc86057..00c52caeebeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1082,6 +1082,9 @@ size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, void *buf, size_t size, bool write); +uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, + uint32_t inst, uint32_t reg_addr, char reg_name[], + uint32_t expected_value, uint32_t mask); uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); void amdgpu_device_wreg(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f432064a0535..82a3d0ff6560 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -6081,3 +6081,31 @@ bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) return true; } } + +uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, + uint32_t inst, uint32_t reg_addr, char reg_name[], + uint32_t expected_value, uint32_t mask) +{ + uint32_t ret = 0; + uint32_t old_ = 0; + uint32_t tmp_ = RREG32(reg_addr); + uint32_t loop = adev->usec_timeout; + + while ((tmp_ & (mask)) != (expected_value)) { + if (old_ != tmp_) { + loop = adev->usec_timeout; + old_ = tmp_; + } else + udelay(1); + tmp_ = RREG32(reg_addr); + loop--; + if (!loop) { + DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", + inst, reg_name, (uint32_t)expected_value, + (uint32_t)(tmp_ & (mask))); + ret = -ETIMEDOUT; + break; + } + } + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 3f6dac856d49..eb35096756b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -26,6 +26,8 @@ /* Register Access Macros */ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) +#define SOC15_REG_OFFSET1(ip, inst, reg, offset) \ + (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)+(offset)) #define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ @@ -86,31 +88,15 @@ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \ value, 0, ip##_HWIP) -#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \ -({ int ret = 0; \ - do { \ - uint32_t old_ = 0; \ - uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ - uint32_t loop = adev->usec_timeout; \ - ret = 0; \ - while ((tmp_ & (mask)) != (expected_value)) { \ - if (old_ != tmp_) { \ - loop = adev->usec_timeout; \ - old_ = tmp_; \ - } else \ - udelay(1); \ - tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ - loop--; \ - if (!loop) { \ - DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ - inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \ - ret = -ETIMEDOUT; \ - break; \ - } \ - } \ - } while (0); \ - ret; \ -}) +#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \ + amdgpu_device_wait_on_rreg(adev, inst, \ + (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)), \ + #reg, expected_value, mask) + +#define SOC15_WAIT_ON_RREG_OFFSET(ip, inst, reg, offset, expected_value, mask) \ + amdgpu_device_wait_on_rreg(adev, inst, \ + (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg) + (offset)), \ + #reg, expected_value, mask) #define WREG32_RLC(reg, value) \ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP) From 50b8b62ea496c615fb08ccc8a6fdf1891af75d7e Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 24 Apr 2022 15:38:16 +0800 Subject: [PATCH 250/861] drm/amdgpu: add vcn v4_0_3 ip headers Add vcn v4_0_3 register offset adn shift masks header files v2: update headers (Alex) Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../include/asic_reg/vcn/vcn_4_0_3_offset.h | 2254 ++++ .../include/asic_reg/vcn/vcn_4_0_3_sh_mask.h | 10424 ++++++++++++++++ 2 files changed, 12678 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h new file mode 100644 index 000000000000..facad93cd06f --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h @@ -0,0 +1,2254 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _vcn_4_0_3_OFFSET_HEADER +#define _vcn_4_0_3_OFFSET_HEADER + + + +// addressBlock: aid_uvd0_uvddec +// base address: 0x1fb00 +#define regUVD_TOP_CTRL 0x00c0 +#define regUVD_TOP_CTRL_BASE_IDX 1 +#define regUVD_CGC_GATE 0x00c1 +#define regUVD_CGC_GATE_BASE_IDX 1 +#define regUVD_CGC_CTRL 0x00c2 +#define regUVD_CGC_CTRL_BASE_IDX 1 +#define regAVM_SUVD_CGC_GATE 0x00c4 +#define regAVM_SUVD_CGC_GATE_BASE_IDX 1 +#define regCDEFE_SUVD_CGC_GATE 0x00c4 +#define regCDEFE_SUVD_CGC_GATE_BASE_IDX 1 +#define regEFC_SUVD_CGC_GATE 0x00c4 +#define regEFC_SUVD_CGC_GATE_BASE_IDX 1 +#define regENT_SUVD_CGC_GATE 0x00c4 +#define regENT_SUVD_CGC_GATE_BASE_IDX 1 +#define regIME_SUVD_CGC_GATE 0x00c4 +#define regIME_SUVD_CGC_GATE_BASE_IDX 1 +#define regPPU_SUVD_CGC_GATE 0x00c4 +#define regPPU_SUVD_CGC_GATE_BASE_IDX 1 +#define regSAOE_SUVD_CGC_GATE 0x00c4 +#define regSAOE_SUVD_CGC_GATE_BASE_IDX 1 +#define regSCM_SUVD_CGC_GATE 0x00c4 +#define regSCM_SUVD_CGC_GATE_BASE_IDX 1 +#define regSDB_SUVD_CGC_GATE 0x00c4 +#define regSDB_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT0_NXT_SUVD_CGC_GATE 0x00c4 +#define regSIT0_NXT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT1_NXT_SUVD_CGC_GATE 0x00c4 +#define regSIT1_NXT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT2_NXT_SUVD_CGC_GATE 0x00c4 +#define regSIT2_NXT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT_SUVD_CGC_GATE 0x00c4 +#define regSIT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSMPA_SUVD_CGC_GATE 0x00c4 +#define regSMPA_SUVD_CGC_GATE_BASE_IDX 1 +#define regSMP_SUVD_CGC_GATE 0x00c4 +#define regSMP_SUVD_CGC_GATE_BASE_IDX 1 +#define regSRE_SUVD_CGC_GATE 0x00c4 +#define regSRE_SUVD_CGC_GATE_BASE_IDX 1 +#define regUVD_MPBE0_SUVD_CGC_GATE 0x00c4 +#define regUVD_MPBE0_SUVD_CGC_GATE_BASE_IDX 1 +#define regUVD_MPBE1_SUVD_CGC_GATE 0x00c4 +#define regUVD_MPBE1_SUVD_CGC_GATE_BASE_IDX 1 +#define regUVD_SUVD_CGC_GATE 0x00c4 +#define regUVD_SUVD_CGC_GATE_BASE_IDX 1 +#define regAVM_SUVD_CGC_GATE2 0x00c5 +#define regAVM_SUVD_CGC_GATE2_BASE_IDX 1 +#define regCDEFE_SUVD_CGC_GATE2 0x00c5 +#define regCDEFE_SUVD_CGC_GATE2_BASE_IDX 1 +#define regDBR_SUVD_CGC_GATE2 0x00c5 +#define regDBR_SUVD_CGC_GATE2_BASE_IDX 1 +#define regENT_SUVD_CGC_GATE2 0x00c5 +#define regENT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regIME_SUVD_CGC_GATE2 0x00c5 +#define regIME_SUVD_CGC_GATE2_BASE_IDX 1 +#define regMPC1_SUVD_CGC_GATE2 0x00c5 +#define regMPC1_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSAOE_SUVD_CGC_GATE2 0x00c5 +#define regSAOE_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSDB_SUVD_CGC_GATE2 0x00c5 +#define regSDB_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT0_NXT_SUVD_CGC_GATE2 0x00c5 +#define regSIT0_NXT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT1_NXT_SUVD_CGC_GATE2 0x00c5 +#define regSIT1_NXT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT2_NXT_SUVD_CGC_GATE2 0x00c5 +#define regSIT2_NXT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT_SUVD_CGC_GATE2 0x00c5 +#define regSIT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSMPA_SUVD_CGC_GATE2 0x00c5 +#define regSMPA_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSMP_SUVD_CGC_GATE2 0x00c5 +#define regSMP_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSRE_SUVD_CGC_GATE2 0x00c5 +#define regSRE_SUVD_CGC_GATE2_BASE_IDX 1 +#define regUVD_MPBE0_SUVD_CGC_GATE2 0x00c5 +#define regUVD_MPBE0_SUVD_CGC_GATE2_BASE_IDX 1 +#define regUVD_MPBE1_SUVD_CGC_GATE2 0x00c5 +#define regUVD_MPBE1_SUVD_CGC_GATE2_BASE_IDX 1 +#define regUVD_SUVD_CGC_GATE2 0x00c5 +#define regUVD_SUVD_CGC_GATE2_BASE_IDX 1 +#define regAVM_SUVD_CGC_CTRL 0x00c6 +#define regAVM_SUVD_CGC_CTRL_BASE_IDX 1 +#define regCDEFE_SUVD_CGC_CTRL 0x00c6 +#define regCDEFE_SUVD_CGC_CTRL_BASE_IDX 1 +#define regDBR_SUVD_CGC_CTRL 0x00c6 +#define regDBR_SUVD_CGC_CTRL_BASE_IDX 1 +#define regEFC_SUVD_CGC_CTRL 0x00c6 +#define regEFC_SUVD_CGC_CTRL_BASE_IDX 1 +#define regENT_SUVD_CGC_CTRL 0x00c6 +#define regENT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regIME_SUVD_CGC_CTRL 0x00c6 +#define regIME_SUVD_CGC_CTRL_BASE_IDX 1 +#define regMPC1_SUVD_CGC_CTRL 0x00c6 +#define regMPC1_SUVD_CGC_CTRL_BASE_IDX 1 +#define regPPU_SUVD_CGC_CTRL 0x00c6 +#define regPPU_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSAOE_SUVD_CGC_CTRL 0x00c6 +#define regSAOE_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSCM_SUVD_CGC_CTRL 0x00c6 +#define regSCM_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSDB_SUVD_CGC_CTRL 0x00c6 +#define regSDB_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT0_NXT_SUVD_CGC_CTRL 0x00c6 +#define regSIT0_NXT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT1_NXT_SUVD_CGC_CTRL 0x00c6 +#define regSIT1_NXT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT2_NXT_SUVD_CGC_CTRL 0x00c6 +#define regSIT2_NXT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT_SUVD_CGC_CTRL 0x00c6 +#define regSIT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSMPA_SUVD_CGC_CTRL 0x00c6 +#define regSMPA_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSMP_SUVD_CGC_CTRL 0x00c6 +#define regSMP_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSRE_SUVD_CGC_CTRL 0x00c6 +#define regSRE_SUVD_CGC_CTRL_BASE_IDX 1 +#define regUVD_MPBE0_SUVD_CGC_CTRL 0x00c6 +#define regUVD_MPBE0_SUVD_CGC_CTRL_BASE_IDX 1 +#define regUVD_MPBE1_SUVD_CGC_CTRL 0x00c6 +#define regUVD_MPBE1_SUVD_CGC_CTRL_BASE_IDX 1 +#define regUVD_SUVD_CGC_CTRL 0x00c6 +#define regUVD_SUVD_CGC_CTRL_BASE_IDX 1 +#define regUVD_CGC_CTRL3 0x00ca +#define regUVD_CGC_CTRL3_BASE_IDX 1 +#define regUVD_GPCOM_VCPU_DATA0 0x00d0 +#define regUVD_GPCOM_VCPU_DATA0_BASE_IDX 1 +#define regUVD_GPCOM_VCPU_DATA1 0x00d1 +#define regUVD_GPCOM_VCPU_DATA1_BASE_IDX 1 +#define regUVD_GPCOM_SYS_CMD 0x00d2 +#define regUVD_GPCOM_SYS_CMD_BASE_IDX 1 +#define regUVD_GPCOM_SYS_DATA0 0x00d3 +#define regUVD_GPCOM_SYS_DATA0_BASE_IDX 1 +#define regUVD_GPCOM_SYS_DATA1 0x00d4 +#define regUVD_GPCOM_SYS_DATA1_BASE_IDX 1 +#define regUVD_VCPU_INT_EN 0x00d5 +#define regUVD_VCPU_INT_EN_BASE_IDX 1 +#define regUVD_VCPU_INT_STATUS 0x00d6 +#define regUVD_VCPU_INT_STATUS_BASE_IDX 1 +#define regUVD_VCPU_INT_ACK 0x00d7 +#define regUVD_VCPU_INT_ACK_BASE_IDX 1 +#define regUVD_VCPU_INT_ROUTE 0x00d8 +#define regUVD_VCPU_INT_ROUTE_BASE_IDX 1 +#define regUVD_DRV_FW_MSG 0x00d9 +#define regUVD_DRV_FW_MSG_BASE_IDX 1 +#define regUVD_FW_DRV_MSG_ACK 0x00da +#define regUVD_FW_DRV_MSG_ACK_BASE_IDX 1 +#define regUVD_SUVD_INT_EN 0x00db +#define regUVD_SUVD_INT_EN_BASE_IDX 1 +#define regUVD_SUVD_INT_STATUS 0x00dc +#define regUVD_SUVD_INT_STATUS_BASE_IDX 1 +#define regUVD_SUVD_INT_ACK 0x00dd +#define regUVD_SUVD_INT_ACK_BASE_IDX 1 +#define regUVD_ENC_VCPU_INT_EN 0x00de +#define regUVD_ENC_VCPU_INT_EN_BASE_IDX 1 +#define regUVD_ENC_VCPU_INT_STATUS 0x00df +#define regUVD_ENC_VCPU_INT_STATUS_BASE_IDX 1 +#define regUVD_ENC_VCPU_INT_ACK 0x00e0 +#define regUVD_ENC_VCPU_INT_ACK_BASE_IDX 1 +#define regUVD_MASTINT_EN 0x00e1 +#define regUVD_MASTINT_EN_BASE_IDX 1 +#define regUVD_SYS_INT_EN 0x00e2 +#define regUVD_SYS_INT_EN_BASE_IDX 1 +#define regUVD_SYS_INT_STATUS 0x00e3 +#define regUVD_SYS_INT_STATUS_BASE_IDX 1 +#define regUVD_SYS_INT_ACK 0x00e4 +#define regUVD_SYS_INT_ACK_BASE_IDX 1 +#define regUVD_JOB_DONE 0x00e5 +#define regUVD_JOB_DONE_BASE_IDX 1 +#define regUVD_CBUF_ID 0x00e6 +#define regUVD_CBUF_ID_BASE_IDX 1 +#define regUVD_CONTEXT_ID 0x00e7 +#define regUVD_CONTEXT_ID_BASE_IDX 1 +#define regUVD_CONTEXT_ID2 0x00e8 +#define regUVD_CONTEXT_ID2_BASE_IDX 1 +#define regUVD_NO_OP 0x00e9 +#define regUVD_NO_OP_BASE_IDX 1 +#define regUVD_RB_BASE_LO 0x00ea +#define regUVD_RB_BASE_LO_BASE_IDX 1 +#define regUVD_RB_BASE_HI 0x00eb +#define regUVD_RB_BASE_HI_BASE_IDX 1 +#define regUVD_RB_SIZE 0x00ec +#define regUVD_RB_SIZE_BASE_IDX 1 +#define regUVD_RB_BASE_LO2 0x00ef +#define regUVD_RB_BASE_LO2_BASE_IDX 1 +#define regUVD_RB_BASE_HI2 0x00f0 +#define regUVD_RB_BASE_HI2_BASE_IDX 1 +#define regUVD_RB_SIZE2 0x00f1 +#define regUVD_RB_SIZE2_BASE_IDX 1 +#define regUVD_RB_BASE_LO3 0x00f4 +#define regUVD_RB_BASE_LO3_BASE_IDX 1 +#define regUVD_RB_BASE_HI3 0x00f5 +#define regUVD_RB_BASE_HI3_BASE_IDX 1 +#define regUVD_RB_SIZE3 0x00f6 +#define regUVD_RB_SIZE3_BASE_IDX 1 +#define regUVD_RB_BASE_LO4 0x00f9 +#define regUVD_RB_BASE_LO4_BASE_IDX 1 +#define regUVD_RB_BASE_HI4 0x00fa +#define regUVD_RB_BASE_HI4_BASE_IDX 1 +#define regUVD_RB_SIZE4 0x00fb +#define regUVD_RB_SIZE4_BASE_IDX 1 +#define regUVD_OUT_RB_BASE_LO 0x00fe +#define regUVD_OUT_RB_BASE_LO_BASE_IDX 1 +#define regUVD_OUT_RB_BASE_HI 0x00ff +#define regUVD_OUT_RB_BASE_HI_BASE_IDX 1 +#define regUVD_OUT_RB_SIZE 0x0100 +#define regUVD_OUT_RB_SIZE_BASE_IDX 1 +#define regUVD_IOV_ACTIVE_FCN_ID 0x0103 +#define regUVD_IOV_ACTIVE_FCN_ID_BASE_IDX 1 +#define regUVD_IOV_MAILBOX 0x0104 +#define regUVD_IOV_MAILBOX_BASE_IDX 1 +#define regUVD_IOV_MAILBOX_RESP 0x0105 +#define regUVD_IOV_MAILBOX_RESP_BASE_IDX 1 +#define regUVD_RB_ARB_CTRL 0x0106 +#define regUVD_RB_ARB_CTRL_BASE_IDX 1 +#define regUVD_CTX_INDEX 0x0107 +#define regUVD_CTX_INDEX_BASE_IDX 1 +#define regUVD_CTX_DATA 0x0108 +#define regUVD_CTX_DATA_BASE_IDX 1 +#define regUVD_CXW_WR 0x0109 +#define regUVD_CXW_WR_BASE_IDX 1 +#define regUVD_CXW_WR_INT_ID 0x010a +#define regUVD_CXW_WR_INT_ID_BASE_IDX 1 +#define regUVD_CXW_WR_INT_CTX_ID 0x010b +#define regUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1 +#define regUVD_CXW_INT_ID 0x010c +#define regUVD_CXW_INT_ID_BASE_IDX 1 +#define regUVD_MPEG2_ERROR 0x010d +#define regUVD_MPEG2_ERROR_BASE_IDX 1 +#define regUVD_YBASE 0x0110 +#define regUVD_YBASE_BASE_IDX 1 +#define regUVD_UVBASE 0x0111 +#define regUVD_UVBASE_BASE_IDX 1 +#define regUVD_PITCH 0x0112 +#define regUVD_PITCH_BASE_IDX 1 +#define regUVD_WIDTH 0x0113 +#define regUVD_WIDTH_BASE_IDX 1 +#define regUVD_HEIGHT 0x0114 +#define regUVD_HEIGHT_BASE_IDX 1 +#define regUVD_PICCOUNT 0x0115 +#define regUVD_PICCOUNT_BASE_IDX 1 +#define regUVD_MPRD_INITIAL_XY 0x0116 +#define regUVD_MPRD_INITIAL_XY_BASE_IDX 1 +#define regUVD_MPEG2_CTRL 0x0117 +#define regUVD_MPEG2_CTRL_BASE_IDX 1 +#define regUVD_MB_CTL_BUF_BASE 0x0118 +#define regUVD_MB_CTL_BUF_BASE_BASE_IDX 1 +#define regUVD_PIC_CTL_BUF_BASE 0x0119 +#define regUVD_PIC_CTL_BUF_BASE_BASE_IDX 1 +#define regUVD_DXVA_BUF_SIZE 0x011a +#define regUVD_DXVA_BUF_SIZE_BASE_IDX 1 +#define regUVD_SCRATCH_NP 0x011b +#define regUVD_SCRATCH_NP_BASE_IDX 1 +#define regUVD_CLK_SWT_HANDSHAKE 0x011c +#define regUVD_CLK_SWT_HANDSHAKE_BASE_IDX 1 +#define regUVD_GP_SCRATCH0 0x011e +#define regUVD_GP_SCRATCH0_BASE_IDX 1 +#define regUVD_GP_SCRATCH1 0x011f +#define regUVD_GP_SCRATCH1_BASE_IDX 1 +#define regUVD_GP_SCRATCH2 0x0120 +#define regUVD_GP_SCRATCH2_BASE_IDX 1 +#define regUVD_GP_SCRATCH3 0x0121 +#define regUVD_GP_SCRATCH3_BASE_IDX 1 +#define regUVD_GP_SCRATCH4 0x0122 +#define regUVD_GP_SCRATCH4_BASE_IDX 1 +#define regUVD_GP_SCRATCH5 0x0123 +#define regUVD_GP_SCRATCH5_BASE_IDX 1 +#define regUVD_GP_SCRATCH6 0x0124 +#define regUVD_GP_SCRATCH6_BASE_IDX 1 +#define regUVD_GP_SCRATCH7 0x0125 +#define regUVD_GP_SCRATCH7_BASE_IDX 1 +#define regUVD_GP_SCRATCH8 0x0126 +#define regUVD_GP_SCRATCH8_BASE_IDX 1 +#define regUVD_GP_SCRATCH9 0x0127 +#define regUVD_GP_SCRATCH9_BASE_IDX 1 +#define regUVD_GP_SCRATCH10 0x0128 +#define regUVD_GP_SCRATCH10_BASE_IDX 1 +#define regUVD_GP_SCRATCH11 0x0129 +#define regUVD_GP_SCRATCH11_BASE_IDX 1 +#define regUVD_GP_SCRATCH12 0x012a +#define regUVD_GP_SCRATCH12_BASE_IDX 1 +#define regUVD_GP_SCRATCH13 0x012b +#define regUVD_GP_SCRATCH13_BASE_IDX 1 +#define regUVD_GP_SCRATCH14 0x012c +#define regUVD_GP_SCRATCH14_BASE_IDX 1 +#define regUVD_GP_SCRATCH15 0x012d +#define regUVD_GP_SCRATCH15_BASE_IDX 1 +#define regUVD_GP_SCRATCH16 0x012e +#define regUVD_GP_SCRATCH16_BASE_IDX 1 +#define regUVD_GP_SCRATCH17 0x012f +#define regUVD_GP_SCRATCH17_BASE_IDX 1 +#define regUVD_GP_SCRATCH18 0x0130 +#define regUVD_GP_SCRATCH18_BASE_IDX 1 +#define regUVD_GP_SCRATCH19 0x0131 +#define regUVD_GP_SCRATCH19_BASE_IDX 1 +#define regUVD_GP_SCRATCH20 0x0132 +#define regUVD_GP_SCRATCH20_BASE_IDX 1 +#define regUVD_GP_SCRATCH21 0x0133 +#define regUVD_GP_SCRATCH21_BASE_IDX 1 +#define regUVD_GP_SCRATCH22 0x0134 +#define regUVD_GP_SCRATCH22_BASE_IDX 1 +#define regUVD_GP_SCRATCH23 0x0135 +#define regUVD_GP_SCRATCH23_BASE_IDX 1 +#define regUVD_AUDIO_RB_BASE_LO 0x0136 +#define regUVD_AUDIO_RB_BASE_LO_BASE_IDX 1 +#define regUVD_AUDIO_RB_BASE_HI 0x0137 +#define regUVD_AUDIO_RB_BASE_HI_BASE_IDX 1 +#define regUVD_AUDIO_RB_SIZE 0x0138 +#define regUVD_AUDIO_RB_SIZE_BASE_IDX 1 +#define regUVD_VCPU_INT_STATUS2 0x013b +#define regUVD_VCPU_INT_STATUS2_BASE_IDX 1 +#define regUVD_VCPU_INT_ACK2 0x013c +#define regUVD_VCPU_INT_ACK2_BASE_IDX 1 +#define regUVD_VCPU_INT_EN2 0x013d +#define regUVD_VCPU_INT_EN2_BASE_IDX 1 +#define regUVD_SUVD_CGC_STATUS2 0x013e +#define regUVD_SUVD_CGC_STATUS2_BASE_IDX 1 +#define regUVD_SUVD_INT_STATUS2 0x0140 +#define regUVD_SUVD_INT_STATUS2_BASE_IDX 1 +#define regUVD_SUVD_INT_EN2 0x0141 +#define regUVD_SUVD_INT_EN2_BASE_IDX 1 +#define regUVD_SUVD_INT_ACK2 0x0142 +#define regUVD_SUVD_INT_ACK2_BASE_IDX 1 +#define regUVD_STATUS 0x0143 +#define regUVD_STATUS_BASE_IDX 1 +#define regUVD_ENC_PIPE_BUSY 0x0144 +#define regUVD_ENC_PIPE_BUSY_BASE_IDX 1 +#define regUVD_FW_POWER_STATUS 0x0145 +#define regUVD_FW_POWER_STATUS_BASE_IDX 1 +#define regUVD_CNTL 0x0146 +#define regUVD_CNTL_BASE_IDX 1 +#define regUVD_SOFT_RESET 0x0147 +#define regUVD_SOFT_RESET_BASE_IDX 1 +#define regUVD_SOFT_RESET2 0x0148 +#define regUVD_SOFT_RESET2_BASE_IDX 1 +#define regUVD_MMSCH_SOFT_RESET 0x0149 +#define regUVD_MMSCH_SOFT_RESET_BASE_IDX 1 +#define regUVD_WIG_CTRL 0x014a +#define regUVD_WIG_CTRL_BASE_IDX 1 +#define regUVD_CGC_STATUS 0x014c +#define regUVD_CGC_STATUS_BASE_IDX 1 +#define regUVD_CGC_UDEC_STATUS 0x014e +#define regUVD_CGC_UDEC_STATUS_BASE_IDX 1 +#define regUVD_SUVD_CGC_STATUS 0x0150 +#define regUVD_SUVD_CGC_STATUS_BASE_IDX 1 +#define regUVD_GPCOM_VCPU_CMD 0x0152 +#define regUVD_GPCOM_VCPU_CMD_BASE_IDX 1 + + +// addressBlock: aid_uvd0_ecpudec +// base address: 0x1fe00 +#define regUVD_VCPU_CACHE_OFFSET0 0x0180 +#define regUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE0 0x0181 +#define regUVD_VCPU_CACHE_SIZE0_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET1 0x0182 +#define regUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE1 0x0183 +#define regUVD_VCPU_CACHE_SIZE1_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET2 0x0184 +#define regUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE2 0x0185 +#define regUVD_VCPU_CACHE_SIZE2_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET3 0x0186 +#define regUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE3 0x0187 +#define regUVD_VCPU_CACHE_SIZE3_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET4 0x0188 +#define regUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE4 0x0189 +#define regUVD_VCPU_CACHE_SIZE4_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET5 0x018a +#define regUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE5 0x018b +#define regUVD_VCPU_CACHE_SIZE5_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET6 0x018c +#define regUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE6 0x018d +#define regUVD_VCPU_CACHE_SIZE6_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET7 0x018e +#define regUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE7 0x018f +#define regUVD_VCPU_CACHE_SIZE7_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET8 0x0190 +#define regUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE8 0x0191 +#define regUVD_VCPU_CACHE_SIZE8_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_OFFSET0 0x0192 +#define regUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_SIZE0 0x0193 +#define regUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_OFFSET1 0x0194 +#define regUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_SIZE1 0x0195 +#define regUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1 +#define regUVD_VCPU_CNTL 0x0196 +#define regUVD_VCPU_CNTL_BASE_IDX 1 +#define regUVD_VCPU_PRID 0x0197 +#define regUVD_VCPU_PRID_BASE_IDX 1 +#define regUVD_VCPU_TRCE 0x0198 +#define regUVD_VCPU_TRCE_BASE_IDX 1 +#define regUVD_VCPU_TRCE_RD 0x0199 +#define regUVD_VCPU_TRCE_RD_BASE_IDX 1 +#define regUVD_VCPU_IND_INDEX 0x019b +#define regUVD_VCPU_IND_INDEX_BASE_IDX 1 +#define regUVD_VCPU_IND_DATA 0x019c +#define regUVD_VCPU_IND_DATA_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_mpcdec +// base address: 0x1ff30 +#define regUVD_MP_SWAP_CNTL 0x01cc +#define regUVD_MP_SWAP_CNTL_BASE_IDX 1 +#define regUVD_MP_SWAP_CNTL2 0x01cd +#define regUVD_MP_SWAP_CNTL2_BASE_IDX 1 +#define regUVD_MPC_LUMA_SRCH 0x01ce +#define regUVD_MPC_LUMA_SRCH_BASE_IDX 1 +#define regUVD_MPC_LUMA_HIT 0x01cf +#define regUVD_MPC_LUMA_HIT_BASE_IDX 1 +#define regUVD_MPC_LUMA_HITPEND 0x01d0 +#define regUVD_MPC_LUMA_HITPEND_BASE_IDX 1 +#define regUVD_MPC_CHROMA_SRCH 0x01d1 +#define regUVD_MPC_CHROMA_SRCH_BASE_IDX 1 +#define regUVD_MPC_CHROMA_HIT 0x01d2 +#define regUVD_MPC_CHROMA_HIT_BASE_IDX 1 +#define regUVD_MPC_CHROMA_HITPEND 0x01d3 +#define regUVD_MPC_CHROMA_HITPEND_BASE_IDX 1 +#define regUVD_MPC_CNTL 0x01d4 +#define regUVD_MPC_CNTL_BASE_IDX 1 +#define regUVD_MPC_PITCH 0x01d5 +#define regUVD_MPC_PITCH_BASE_IDX 1 +#define regUVD_MPC_SET_MUXA0 0x01d6 +#define regUVD_MPC_SET_MUXA0_BASE_IDX 1 +#define regUVD_MPC_SET_MUXA1 0x01d7 +#define regUVD_MPC_SET_MUXA1_BASE_IDX 1 +#define regUVD_MPC_SET_MUXB0 0x01d8 +#define regUVD_MPC_SET_MUXB0_BASE_IDX 1 +#define regUVD_MPC_SET_MUXB1 0x01d9 +#define regUVD_MPC_SET_MUXB1_BASE_IDX 1 +#define regUVD_MPC_SET_MUX 0x01da +#define regUVD_MPC_SET_MUX_BASE_IDX 1 +#define regUVD_MPC_SET_ALU 0x01db +#define regUVD_MPC_SET_ALU_BASE_IDX 1 +#define regUVD_MPC_PERF0 0x01dc +#define regUVD_MPC_PERF0_BASE_IDX 1 +#define regUVD_MPC_PERF1 0x01dd +#define regUVD_MPC_PERF1_BASE_IDX 1 +#define regUVD_MPC_IND_INDEX 0x01de +#define regUVD_MPC_IND_INDEX_BASE_IDX 1 +#define regUVD_MPC_IND_DATA 0x01df +#define regUVD_MPC_IND_DATA_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_rbcdec +// base address: 0x1ff90 +#define regUVD_RBC_IB_SIZE 0x01e4 +#define regUVD_RBC_IB_SIZE_BASE_IDX 1 +#define regUVD_RBC_IB_SIZE_UPDATE 0x01e5 +#define regUVD_RBC_IB_SIZE_UPDATE_BASE_IDX 1 +#define regUVD_RBC_RB_CNTL 0x01e6 +#define regUVD_RBC_RB_CNTL_BASE_IDX 1 +#define regUVD_RBC_RB_RPTR_ADDR 0x01e7 +#define regUVD_RBC_RB_RPTR_ADDR_BASE_IDX 1 +#define regUVD_RBC_VCPU_ACCESS 0x01ea +#define regUVD_RBC_VCPU_ACCESS_BASE_IDX 1 +#define regUVD_FW_SEMAPHORE_CNTL 0x01eb +#define regUVD_FW_SEMAPHORE_CNTL_BASE_IDX 1 +#define regUVD_RBC_READ_REQ_URGENT_CNTL 0x01ed +#define regUVD_RBC_READ_REQ_URGENT_CNTL_BASE_IDX 1 +#define regUVD_RBC_RB_WPTR_CNTL 0x01ee +#define regUVD_RBC_RB_WPTR_CNTL_BASE_IDX 1 +#define regUVD_RBC_WPTR_STATUS 0x01ef +#define regUVD_RBC_WPTR_STATUS_BASE_IDX 1 +#define regUVD_RBC_WPTR_POLL_CNTL 0x01f0 +#define regUVD_RBC_WPTR_POLL_CNTL_BASE_IDX 1 +#define regUVD_RBC_WPTR_POLL_ADDR 0x01f1 +#define regUVD_RBC_WPTR_POLL_ADDR_BASE_IDX 1 +#define regUVD_SEMA_CMD 0x01f2 +#define regUVD_SEMA_CMD_BASE_IDX 1 +#define regUVD_SEMA_ADDR_LOW 0x01f3 +#define regUVD_SEMA_ADDR_LOW_BASE_IDX 1 +#define regUVD_SEMA_ADDR_HIGH 0x01f4 +#define regUVD_SEMA_ADDR_HIGH_BASE_IDX 1 +#define regUVD_ENGINE_CNTL 0x01f5 +#define regUVD_ENGINE_CNTL_BASE_IDX 1 +#define regUVD_SEMA_TIMEOUT_STATUS 0x01f6 +#define regUVD_SEMA_TIMEOUT_STATUS_BASE_IDX 1 +#define regUVD_SEMA_CNTL 0x01f7 +#define regUVD_SEMA_CNTL_BASE_IDX 1 +#define regUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x01f8 +#define regUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1 +#define regUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x01f9 +#define regUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_BASE_IDX 1 +#define regUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x01fa +#define regUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1 +#define regUVD_JOB_START 0x01fb +#define regUVD_JOB_START_BASE_IDX 1 +#define regUVD_RBC_BUF_STATUS 0x01fc +#define regUVD_RBC_BUF_STATUS_BASE_IDX 1 +#define regUVD_RBC_SWAP_CNTL 0x01fd +#define regUVD_RBC_SWAP_CNTL_BASE_IDX 1 + + +// addressBlock: aid_uvd0_lmi_adpdec +// base address: 0x20090 +#define regUVD_LMI_RE_64BIT_BAR_LOW 0x0224 +#define regUVD_LMI_RE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_RE_64BIT_BAR_HIGH 0x0225 +#define regUVD_LMI_RE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_IT_64BIT_BAR_LOW 0x0226 +#define regUVD_LMI_IT_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_IT_64BIT_BAR_HIGH 0x0227 +#define regUVD_LMI_IT_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MP_64BIT_BAR_LOW 0x0228 +#define regUVD_LMI_MP_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MP_64BIT_BAR_HIGH 0x0229 +#define regUVD_LMI_MP_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_CM_64BIT_BAR_LOW 0x022a +#define regUVD_LMI_CM_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_CM_64BIT_BAR_HIGH 0x022b +#define regUVD_LMI_CM_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_DB_64BIT_BAR_LOW 0x022c +#define regUVD_LMI_DB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_DB_64BIT_BAR_HIGH 0x022d +#define regUVD_LMI_DB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_DBW_64BIT_BAR_LOW 0x022e +#define regUVD_LMI_DBW_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_DBW_64BIT_BAR_HIGH 0x022f +#define regUVD_LMI_DBW_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_IDCT_64BIT_BAR_LOW 0x0230 +#define regUVD_LMI_IDCT_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_IDCT_64BIT_BAR_HIGH 0x0231 +#define regUVD_LMI_IDCT_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW 0x0232 +#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH 0x0233 +#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW 0x0234 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH 0x0235 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW 0x0236 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH 0x0237 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPC_64BIT_BAR_LOW 0x0238 +#define regUVD_LMI_MPC_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPC_64BIT_BAR_HIGH 0x0239 +#define regUVD_LMI_MPC_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x023a +#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x023b +#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x023c +#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x023d +#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_LBSI_64BIT_BAR_LOW 0x023e +#define regUVD_LMI_LBSI_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_LBSI_64BIT_BAR_HIGH 0x023f +#define regUVD_LMI_LBSI_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x0240 +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x0241 +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x0242 +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x0243 +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0244 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0245 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_CENC_64BIT_BAR_LOW 0x0246 +#define regUVD_LMI_CENC_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_CENC_64BIT_BAR_HIGH 0x0247 +#define regUVD_LMI_CENC_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_SRE_64BIT_BAR_LOW 0x0248 +#define regUVD_LMI_SRE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_SRE_64BIT_BAR_HIGH 0x0249 +#define regUVD_LMI_SRE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW 0x024a +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH 0x024b +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW 0x024c +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH 0x024d +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW 0x024e +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH 0x024f +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_REF_64BIT_BAR_LOW 0x0250 +#define regUVD_LMI_MIF_REF_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_REF_64BIT_BAR_HIGH 0x0251 +#define regUVD_LMI_MIF_REF_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW 0x0252 +#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH 0x0253 +#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW 0x0254 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH 0x0255 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW 0x0256 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH 0x0257 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW 0x0258 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH 0x0259 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW 0x025a +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH 0x025b +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW 0x025c +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH 0x025d +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW 0x025e +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH 0x025f +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW 0x0260 +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH 0x0261 +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW 0x0262 +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH 0x0263 +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW 0x0264 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH 0x0265 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW 0x0266 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH 0x0267 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x0270 +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x0271 +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x0272 +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x0273 +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x0274 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x0275 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x0276 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x0277 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0278 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0279 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x027a +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x027b +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x027c +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x027d +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x027e +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x027f +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW 0x0280 +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH 0x0281 +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW 0x0282 +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH 0x0283 +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_SPH_64BIT_BAR_HIGH 0x0284 +#define regUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW 0x0298 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH 0x0299 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW 0x029a +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH 0x029b +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW 0x029c +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH 0x029d +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW 0x029e +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH 0x029f +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_ADP_ATOMIC_CONFIG 0x02a1 +#define regUVD_ADP_ATOMIC_CONFIG_BASE_IDX 1 +#define regUVD_LMI_ARB_CTRL2 0x02a2 +#define regUVD_LMI_ARB_CTRL2_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x02a7 +#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC_VMIDS_MULTI 0x02a8 +#define regUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1 +#define regUVD_LMI_LAT_CTRL 0x02a9 +#define regUVD_LMI_LAT_CTRL_BASE_IDX 1 +#define regUVD_LMI_LAT_CNTR 0x02aa +#define regUVD_LMI_LAT_CNTR_BASE_IDX 1 +#define regUVD_LMI_AVG_LAT_CNTR 0x02ab +#define regUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1 +#define regUVD_LMI_SPH 0x02ac +#define regUVD_LMI_SPH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_VMID 0x02ad +#define regUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1 +#define regUVD_LMI_CTRL2 0x02ae +#define regUVD_LMI_CTRL2_BASE_IDX 1 +#define regUVD_LMI_URGENT_CTRL 0x02af +#define regUVD_LMI_URGENT_CTRL_BASE_IDX 1 +#define regUVD_LMI_CTRL 0x02b0 +#define regUVD_LMI_CTRL_BASE_IDX 1 +#define regUVD_LMI_STATUS 0x02b1 +#define regUVD_LMI_STATUS_BASE_IDX 1 +#define regUVD_LMI_PERFMON_CTRL 0x02b4 +#define regUVD_LMI_PERFMON_CTRL_BASE_IDX 1 +#define regUVD_LMI_PERFMON_COUNT_LO 0x02b5 +#define regUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1 +#define regUVD_LMI_PERFMON_COUNT_HI 0x02b6 +#define regUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1 +#define regUVD_LMI_ADP_SWAP_CNTL 0x02b7 +#define regUVD_LMI_ADP_SWAP_CNTL_BASE_IDX 1 +#define regUVD_LMI_RBC_RB_VMID 0x02b8 +#define regUVD_LMI_RBC_RB_VMID_BASE_IDX 1 +#define regUVD_LMI_RBC_IB_VMID 0x02b9 +#define regUVD_LMI_RBC_IB_VMID_BASE_IDX 1 +#define regUVD_LMI_MC_CREDITS 0x02ba +#define regUVD_LMI_MC_CREDITS_BASE_IDX 1 +#define regUVD_LMI_ADP_IND_INDEX 0x02be +#define regUVD_LMI_ADP_IND_INDEX_BASE_IDX 1 +#define regUVD_LMI_ADP_IND_DATA 0x02bf +#define regUVD_LMI_ADP_IND_DATA_BASE_IDX 1 +#define regUVD_LMI_ADP_PF_EN 0x02c0 +#define regUVD_LMI_ADP_PF_EN_BASE_IDX 1 +#define regUVD_LMI_PREF_CTRL 0x02c2 +#define regUVD_LMI_PREF_CTRL_BASE_IDX 1 +#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW 0x02dd +#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH 0x02de +#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regVCN_RAS_CNTL 0x02df +#define regVCN_RAS_CNTL_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jpeg0_jpegnpdec +// base address: 0x20f00 +#define regUVD_JPEG_CNTL 0x05c0 +#define regUVD_JPEG_CNTL_BASE_IDX 1 +#define regUVD_JPEG_RB_BASE 0x05c1 +#define regUVD_JPEG_RB_BASE_BASE_IDX 1 +#define regUVD_JPEG_RB_WPTR 0x05c2 +#define regUVD_JPEG_RB_WPTR_BASE_IDX 1 +#define regUVD_JPEG_RB_RPTR 0x05c3 +#define regUVD_JPEG_RB_RPTR_BASE_IDX 1 +#define regUVD_JPEG_RB_SIZE 0x05c4 +#define regUVD_JPEG_RB_SIZE_BASE_IDX 1 +#define regUVD_JPEG_DEC_CNT 0x05c5 +#define regUVD_JPEG_DEC_CNT_BASE_IDX 1 +#define regUVD_JPEG_SPS_INFO 0x05c6 +#define regUVD_JPEG_SPS_INFO_BASE_IDX 1 +#define regUVD_JPEG_SPS1_INFO 0x05c7 +#define regUVD_JPEG_SPS1_INFO_BASE_IDX 1 +#define regUVD_JPEG_RE_TIMER 0x05c8 +#define regUVD_JPEG_RE_TIMER_BASE_IDX 1 +#define regUVD_JPEG_DEC_SCRATCH0 0x05c9 +#define regUVD_JPEG_DEC_SCRATCH0_BASE_IDX 1 +#define regUVD_JPEG_INT_EN 0x05ca +#define regUVD_JPEG_INT_EN_BASE_IDX 1 +#define regUVD_JPEG_INT_STAT 0x05cb +#define regUVD_JPEG_INT_STAT_BASE_IDX 1 +#define regUVD_JPEG_TIER_CNTL0 0x05cc +#define regUVD_JPEG_TIER_CNTL0_BASE_IDX 1 +#define regUVD_JPEG_TIER_CNTL1 0x05cd +#define regUVD_JPEG_TIER_CNTL1_BASE_IDX 1 +#define regUVD_JPEG_TIER_CNTL2 0x05ce +#define regUVD_JPEG_TIER_CNTL2_BASE_IDX 1 +#define regUVD_JPEG_TIER_STATUS 0x05cf +#define regUVD_JPEG_TIER_STATUS_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jpeg_sclk0_jpegnpsclkdec +// base address: 0x21000 +#define regUVD_JPEG_OUTBUF_CNTL 0x0600 +#define regUVD_JPEG_OUTBUF_CNTL_BASE_IDX 1 +#define regUVD_JPEG_OUTBUF_WPTR 0x0601 +#define regUVD_JPEG_OUTBUF_WPTR_BASE_IDX 1 +#define regUVD_JPEG_OUTBUF_RPTR 0x0602 +#define regUVD_JPEG_OUTBUF_RPTR_BASE_IDX 1 +#define regUVD_JPEG_PITCH 0x0603 +#define regUVD_JPEG_PITCH_BASE_IDX 1 +#define regUVD_JPEG_UV_PITCH 0x0604 +#define regUVD_JPEG_UV_PITCH_BASE_IDX 1 +#define regJPEG_DEC_Y_GFX8_TILING_SURFACE 0x0605 +#define regJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_UV_GFX8_TILING_SURFACE 0x0606 +#define regJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_GFX8_ADDR_CONFIG 0x0607 +#define regJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 1 +#define regJPEG_DEC_Y_GFX10_TILING_SURFACE 0x0608 +#define regJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_UV_GFX10_TILING_SURFACE 0x0609 +#define regJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_GFX10_ADDR_CONFIG 0x060a +#define regJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 1 +#define regJPEG_DEC_ADDR_MODE 0x060b +#define regJPEG_DEC_ADDR_MODE_BASE_IDX 1 +#define regUVD_JPEG_OUTPUT_XY 0x060c +#define regUVD_JPEG_OUTPUT_XY_BASE_IDX 1 +#define regUVD_JPEG_GPCOM_CMD 0x060d +#define regUVD_JPEG_GPCOM_CMD_BASE_IDX 1 +#define regUVD_JPEG_GPCOM_DATA0 0x060e +#define regUVD_JPEG_GPCOM_DATA0_BASE_IDX 1 +#define regUVD_JPEG_GPCOM_DATA1 0x060f +#define regUVD_JPEG_GPCOM_DATA1_BASE_IDX 1 +#define regUVD_JPEG_SCRATCH1 0x0610 +#define regUVD_JPEG_SCRATCH1_BASE_IDX 1 +#define regUVD_JPEG_DEC_SOFT_RST 0x0611 +#define regUVD_JPEG_DEC_SOFT_RST_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jrbc0_uvd_jrbc_dec +// base address: 0x21100 +#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640 +#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_RB_CNTL 0x0641 +#define regUVD_JRBC0_UVD_JRBC_RB_CNTL_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_IB_SIZE 0x0642 +#define regUVD_JRBC0_UVD_JRBC_IB_SIZE_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_URGENT_CNTL 0x0643 +#define regUVD_JRBC0_UVD_JRBC_URGENT_CNTL_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_RB_REF_DATA 0x0644 +#define regUVD_JRBC0_UVD_JRBC_RB_REF_DATA_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER 0x0645 +#define regUVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_SOFT_RESET 0x0648 +#define regUVD_JRBC0_UVD_JRBC_SOFT_RESET_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649 +#define regUVD_JRBC0_UVD_JRBC_STATUS_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_RB_RPTR 0x064a +#define regUVD_JRBC0_UVD_JRBC_RB_RPTR_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_RB_BUF_STATUS 0x064b +#define regUVD_JRBC0_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_IB_BUF_STATUS 0x064c +#define regUVD_JRBC0_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE 0x064d +#define regUVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER 0x064e +#define regUVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_IB_REF_DATA 0x064f +#define regUVD_JRBC0_UVD_JRBC_IB_REF_DATA_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JPEG_PREEMPT_CMD 0x0650 +#define regUVD_JRBC0_UVD_JPEG_PREEMPT_CMD_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0651 +#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0652 +#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_RB_SIZE 0x0653 +#define regUVD_JRBC0_UVD_JRBC_RB_SIZE_BASE_IDX 1 +#define regUVD_JRBC0_UVD_JRBC_SCRATCH0 0x0654 +#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jmi0_uvd_jmi_dec +// base address: 0x21180 +#define regUVD_JMI0_UVD_JPEG_DEC_PF_CTRL 0x0660 +#define regUVD_JMI0_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_CTRL 0x0661 +#define regUVD_JMI0_UVD_LMI_JRBC_CTRL_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_CTRL 0x0662 +#define regUVD_JMI0_UVD_LMI_JPEG_CTRL_BASE_IDX 1 +#define regUVD_JMI0_JPEG_LMI_DROP 0x0663 +#define regUVD_JMI0_JPEG_LMI_DROP_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_VMID 0x0664 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_VMID_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_VMID 0x0665 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_VMID_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_VMID 0x0666 +#define regUVD_JMI0_UVD_LMI_JPEG_VMID_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0667 +#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0668 +#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0669 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x066a +#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x066b +#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x066c +#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID 0x066d +#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 1 +#define regUVD_JMI0_UVD_JMI_DEC_SWAP_CNTL 0x066e +#define regUVD_JMI0_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 1 +#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL 0x066f +#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL_BASE_IDX 1 +#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0670 +#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0671 +#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0672 +#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0673 +#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0674 +#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0675 +#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0676 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0677 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0678 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679 +#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2 0x067d +#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jmi_common_dec +// base address: 0x21300 +#define regUVD_JADP_MCIF_URGENT_CTRL 0x06c1 +#define regUVD_JADP_MCIF_URGENT_CTRL_BASE_IDX 1 +#define regUVD_JMI_URGENT_CTRL 0x06c2 +#define regUVD_JMI_URGENT_CTRL_BASE_IDX 1 +#define regUVD_JMI_CTRL 0x06c3 +#define regUVD_JMI_CTRL_BASE_IDX 1 +#define regJPEG_MEMCHECK_CLAMPING_CNTL 0x06c4 +#define regJPEG_MEMCHECK_CLAMPING_CNTL_BASE_IDX 1 +#define regJPEG_MEMCHECK_SAFE_ADDR 0x06c5 +#define regJPEG_MEMCHECK_SAFE_ADDR_BASE_IDX 1 +#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT 0x06c6 +#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT_BASE_IDX 1 +#define regUVD_JMI_LAT_CTRL 0x06c7 +#define regUVD_JMI_LAT_CTRL_BASE_IDX 1 +#define regUVD_JMI_LAT_CNTR 0x06c8 +#define regUVD_JMI_LAT_CNTR_BASE_IDX 1 +#define regUVD_JMI_AVG_LAT_CNTR 0x06c9 +#define regUVD_JMI_AVG_LAT_CNTR_BASE_IDX 1 +#define regUVD_JMI_PERFMON_CTRL 0x06ca +#define regUVD_JMI_PERFMON_CTRL_BASE_IDX 1 +#define regUVD_JMI_PERFMON_COUNT_LO 0x06cb +#define regUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 1 +#define regUVD_JMI_PERFMON_COUNT_HI 0x06cc +#define regUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 1 +#define regUVD_JMI_CLEAN_STATUS 0x06cd +#define regUVD_JMI_CLEAN_STATUS_BASE_IDX 1 +#define regUVD_JMI_CNTL 0x06ce +#define regUVD_JMI_CNTL_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jpeg_common_dec +// base address: 0x21400 +#define regJPEG_SOFT_RESET_STATUS 0x0700 +#define regJPEG_SOFT_RESET_STATUS_BASE_IDX 1 +#define regJPEG_SYS_INT_EN 0x0701 +#define regJPEG_SYS_INT_EN_BASE_IDX 1 +#define regJPEG_SYS_INT_EN1 0x0702 +#define regJPEG_SYS_INT_EN1_BASE_IDX 1 +#define regJPEG_SYS_INT_STATUS 0x0703 +#define regJPEG_SYS_INT_STATUS_BASE_IDX 1 +#define regJPEG_SYS_INT_STATUS1 0x0704 +#define regJPEG_SYS_INT_STATUS1_BASE_IDX 1 +#define regJPEG_SYS_INT_ACK 0x0705 +#define regJPEG_SYS_INT_ACK_BASE_IDX 1 +#define regJPEG_SYS_INT_ACK1 0x0706 +#define regJPEG_SYS_INT_ACK1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_EN 0x0707 +#define regJPEG_MEMCHECK_SYS_INT_EN_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_EN1 0x0708 +#define regJPEG_MEMCHECK_SYS_INT_EN1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_STAT 0x0709 +#define regJPEG_MEMCHECK_SYS_INT_STAT_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_STAT1 0x070a +#define regJPEG_MEMCHECK_SYS_INT_STAT1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_STAT2 0x070b +#define regJPEG_MEMCHECK_SYS_INT_STAT2_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_ACK 0x070c +#define regJPEG_MEMCHECK_SYS_INT_ACK_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_ACK1 0x070d +#define regJPEG_MEMCHECK_SYS_INT_ACK1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_ACK2 0x070e +#define regJPEG_MEMCHECK_SYS_INT_ACK2_BASE_IDX 1 +#define regJPEG_MASTINT_EN 0x0710 +#define regJPEG_MASTINT_EN_BASE_IDX 1 +#define regJPEG_IH_CTRL 0x0711 +#define regJPEG_IH_CTRL_BASE_IDX 1 +#define regJRBBM_ARB_CTRL 0x0713 +#define regJRBBM_ARB_CTRL_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jpeg_common_sclk_dec +// base address: 0x21480 +#define regJPEG_CGC_GATE 0x0720 +#define regJPEG_CGC_GATE_BASE_IDX 1 +#define regJPEG_CGC_CTRL 0x0721 +#define regJPEG_CGC_CTRL_BASE_IDX 1 +#define regJPEG_CGC_STATUS 0x0722 +#define regJPEG_CGC_STATUS_BASE_IDX 1 +#define regJPEG_COMN_CGC_MEM_CTRL 0x0723 +#define regJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 1 +#define regJPEG_DEC_CGC_MEM_CTRL 0x0724 +#define regJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 1 +#define regJPEG_ENC_CGC_MEM_CTRL 0x0726 +#define regJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 1 +#define regJPEG_PERF_BANK_CONF 0x0727 +#define regJPEG_PERF_BANK_CONF_BASE_IDX 1 +#define regJPEG_PERF_BANK_EVENT_SEL 0x0728 +#define regJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT0 0x0729 +#define regJPEG_PERF_BANK_COUNT0_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT1 0x072a +#define regJPEG_PERF_BANK_COUNT1_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT2 0x072b +#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT3 0x072c +#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_pg_dec +// base address: 0x1f800 +#define regUVD_PGFSM_CONFIG 0x0000 +#define regUVD_PGFSM_CONFIG_BASE_IDX 1 +#define regUVD_PGFSM_STATUS 0x0001 +#define regUVD_PGFSM_STATUS_BASE_IDX 1 +#define regUVD_POWER_STATUS 0x0002 +#define regUVD_POWER_STATUS_BASE_IDX 1 +#define regUVD_JPEG_POWER_STATUS 0x0003 +#define regUVD_JPEG_POWER_STATUS_BASE_IDX 1 +#define regUVD_MC_DJPEG_RD_SPACE 0x0006 +#define regUVD_MC_DJPEG_RD_SPACE_BASE_IDX 1 +#define regUVD_MC_DJPEG_WR_SPACE 0x0007 +#define regUVD_MC_DJPEG_WR_SPACE_BASE_IDX 1 +#define regUVD_MC_EJPEG_RD_SPACE 0x0008 +#define regUVD_MC_EJPEG_RD_SPACE_BASE_IDX 1 +#define regUVD_MC_EJPEG_WR_SPACE 0x0009 +#define regUVD_MC_EJPEG_WR_SPACE_BASE_IDX 1 +#define regUVD_PG_IND_INDEX 0x000c +#define regUVD_PG_IND_INDEX_BASE_IDX 1 +#define regUVD_PG_IND_DATA 0x000e +#define regUVD_PG_IND_DATA_BASE_IDX 1 +#define regCC_UVD_HARVESTING 0x000f +#define regCC_UVD_HARVESTING_BASE_IDX 1 +#define regUVD_DPG_LMA_CTL 0x0011 +#define regUVD_DPG_LMA_CTL_BASE_IDX 1 +#define regUVD_DPG_LMA_DATA 0x0012 +#define regUVD_DPG_LMA_DATA_BASE_IDX 1 +#define regUVD_DPG_LMA_MASK 0x0013 +#define regUVD_DPG_LMA_MASK_BASE_IDX 1 +#define regUVD_DPG_PAUSE 0x0014 +#define regUVD_DPG_PAUSE_BASE_IDX 1 +#define regUVD_SCRATCH1 0x0015 +#define regUVD_SCRATCH1_BASE_IDX 1 +#define regUVD_SCRATCH2 0x0016 +#define regUVD_SCRATCH2_BASE_IDX 1 +#define regUVD_SCRATCH3 0x0017 +#define regUVD_SCRATCH3_BASE_IDX 1 +#define regUVD_SCRATCH4 0x0018 +#define regUVD_SCRATCH4_BASE_IDX 1 +#define regUVD_SCRATCH5 0x0019 +#define regUVD_SCRATCH5_BASE_IDX 1 +#define regUVD_SCRATCH6 0x001a +#define regUVD_SCRATCH6_BASE_IDX 1 +#define regUVD_SCRATCH7 0x001b +#define regUVD_SCRATCH7_BASE_IDX 1 +#define regUVD_SCRATCH8 0x001c +#define regUVD_SCRATCH8_BASE_IDX 1 +#define regUVD_SCRATCH9 0x001d +#define regUVD_SCRATCH9_BASE_IDX 1 +#define regUVD_SCRATCH10 0x001e +#define regUVD_SCRATCH10_BASE_IDX 1 +#define regUVD_SCRATCH11 0x001f +#define regUVD_SCRATCH11_BASE_IDX 1 +#define regUVD_SCRATCH12 0x0020 +#define regUVD_SCRATCH12_BASE_IDX 1 +#define regUVD_SCRATCH13 0x0021 +#define regUVD_SCRATCH13_BASE_IDX 1 +#define regUVD_SCRATCH14 0x0022 +#define regUVD_SCRATCH14_BASE_IDX 1 +#define regUVD_FREE_COUNTER_REG 0x0023 +#define regUVD_FREE_COUNTER_REG_BASE_IDX 1 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0024 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0025 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_DPG_VCPU_CACHE_OFFSET0 0x0026 +#define regUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1 +#define regUVD_DPG_LMI_VCPU_CACHE_VMID 0x0027 +#define regUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1 +#define regUVD_REG_FILTER_EN 0x0028 +#define regUVD_REG_FILTER_EN_BASE_IDX 1 +#define regUVD_SECURITY_REG_VIO_REPORT 0x0029 +#define regUVD_SECURITY_REG_VIO_REPORT_BASE_IDX 1 +#define regUVD_FW_VERSION 0x002a +#define regUVD_FW_VERSION_BASE_IDX 1 +#define regUVD_PF_STATUS 0x002c +#define regUVD_PF_STATUS_BASE_IDX 1 +#define regUVD_DPG_CLK_EN_VCPU_REPORT 0x002e +#define regUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO 0x002f +#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI 0x0030 +#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO 0x0031 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI 0x0032 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR 0x0033 +#define regCC_UVD_VCPU_ERR_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_INST_ADDR_LO 0x0034 +#define regCC_UVD_VCPU_ERR_INST_ADDR_LO_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_INST_ADDR_HI 0x0035 +#define regCC_UVD_VCPU_ERR_INST_ADDR_HI_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC_SPACE 0x003d +#define regUVD_LMI_MMSCH_NC_SPACE_BASE_IDX 1 +#define regUVD_LMI_ATOMIC_SPACE 0x003e +#define regUVD_LMI_ATOMIC_SPACE_BASE_IDX 1 +#define regUVD_GFX8_ADDR_CONFIG 0x0041 +#define regUVD_GFX8_ADDR_CONFIG_BASE_IDX 1 +#define regUVD_GFX10_ADDR_CONFIG 0x0042 +#define regUVD_GFX10_ADDR_CONFIG_BASE_IDX 1 +#define regUVD_GPCNT2_CNTL 0x0043 +#define regUVD_GPCNT2_CNTL_BASE_IDX 1 +#define regUVD_GPCNT2_TARGET_LOWER 0x0044 +#define regUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1 +#define regUVD_GPCNT2_STATUS_LOWER 0x0045 +#define regUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1 +#define regUVD_GPCNT2_TARGET_UPPER 0x0046 +#define regUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1 +#define regUVD_GPCNT2_STATUS_UPPER 0x0047 +#define regUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1 +#define regUVD_GPCNT3_CNTL 0x0048 +#define regUVD_GPCNT3_CNTL_BASE_IDX 1 +#define regUVD_GPCNT3_TARGET_LOWER 0x0049 +#define regUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1 +#define regUVD_GPCNT3_STATUS_LOWER 0x004a +#define regUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1 +#define regUVD_GPCNT3_TARGET_UPPER 0x004b +#define regUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1 +#define regUVD_GPCNT3_STATUS_UPPER 0x004c +#define regUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1 +#define regUVD_VCLK_DS_CNTL 0x004d +#define regUVD_VCLK_DS_CNTL_BASE_IDX 1 +#define regUVD_DCLK_DS_CNTL 0x004e +#define regUVD_DCLK_DS_CNTL_BASE_IDX 1 +#define regUVD_TSC_LOWER 0x004f +#define regUVD_TSC_LOWER_BASE_IDX 1 +#define regUVD_TSC_UPPER 0x0050 +#define regUVD_TSC_UPPER_BASE_IDX 1 +#define regVCN_FEATURES 0x0051 +#define regVCN_FEATURES_BASE_IDX 1 +#define regUVD_GPUIOV_STATUS 0x0055 +#define regUVD_GPUIOV_STATUS_BASE_IDX 1 +#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057 +#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1 +#define regUVD_RAS_MMSCH_FATAL_ERROR 0x0058 +#define regUVD_RAS_MMSCH_FATAL_ERROR_BASE_IDX 1 +#define regUVD_RAS_JPEG0_STATUS 0x0059 +#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1 +#define regUVD_RAS_JPEG1_STATUS 0x005a +#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1 +#define regUVD_RAS_CNTL_PMI_ARB 0x005b +#define regUVD_RAS_CNTL_PMI_ARB_BASE_IDX 1 +#define regUVD_SCRATCH15 0x005c +#define regUVD_SCRATCH15_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL1 0x005d +#define regVCN_JPEG_DB_CTRL1_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL2 0x005e +#define regVCN_JPEG_DB_CTRL2_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL3 0x005f +#define regVCN_JPEG_DB_CTRL3_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL4 0x0060 +#define regVCN_JPEG_DB_CTRL4_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL5 0x0061 +#define regVCN_JPEG_DB_CTRL5_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL6 0x0062 +#define regVCN_JPEG_DB_CTRL6_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL7 0x0063 +#define regVCN_JPEG_DB_CTRL7_BASE_IDX 1 +#define regUVD_SCRATCH32 0x006d +#define regUVD_SCRATCH32_BASE_IDX 1 +#define regUVD_VERSION 0x006e +#define regUVD_VERSION_BASE_IDX 1 +#define regVCN_RB_DB_CTRL 0x0070 +#define regVCN_RB_DB_CTRL_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL 0x0071 +#define regVCN_JPEG_DB_CTRL_BASE_IDX 1 +#define regVCN_RB1_DB_CTRL 0x0072 +#define regVCN_RB1_DB_CTRL_BASE_IDX 1 +#define regVCN_RB2_DB_CTRL 0x0073 +#define regVCN_RB2_DB_CTRL_BASE_IDX 1 +#define regVCN_RB3_DB_CTRL 0x0074 +#define regVCN_RB3_DB_CTRL_BASE_IDX 1 +#define regVCN_RB4_DB_CTRL 0x0075 +#define regVCN_RB4_DB_CTRL_BASE_IDX 1 +#define regVCN_RB_ENABLE 0x0085 +#define regVCN_RB_ENABLE_BASE_IDX 1 +#define regVCN_RB_WPTR_CTRL 0x0086 +#define regVCN_RB_WPTR_CTRL_BASE_IDX 1 +#define regUVD_RB_RPTR 0x00ac +#define regUVD_RB_RPTR_BASE_IDX 1 +#define regUVD_RB_WPTR 0x00ad +#define regUVD_RB_WPTR_BASE_IDX 1 +#define regUVD_RB_RPTR2 0x00ae +#define regUVD_RB_RPTR2_BASE_IDX 1 +#define regUVD_RB_WPTR2 0x00af +#define regUVD_RB_WPTR2_BASE_IDX 1 +#define regUVD_RB_RPTR3 0x00b0 +#define regUVD_RB_RPTR3_BASE_IDX 1 +#define regUVD_RB_WPTR3 0x00b1 +#define regUVD_RB_WPTR3_BASE_IDX 1 +#define regUVD_RB_RPTR4 0x00b2 +#define regUVD_RB_RPTR4_BASE_IDX 1 +#define regUVD_RB_WPTR4 0x00b3 +#define regUVD_RB_WPTR4_BASE_IDX 1 +#define regUVD_OUT_RB_RPTR 0x00b4 +#define regUVD_OUT_RB_RPTR_BASE_IDX 1 +#define regUVD_OUT_RB_WPTR 0x00b5 +#define regUVD_OUT_RB_WPTR_BASE_IDX 1 +#define regUVD_AUDIO_RB_RPTR 0x00b6 +#define regUVD_AUDIO_RB_RPTR_BASE_IDX 1 +#define regUVD_AUDIO_RB_WPTR 0x00b7 +#define regUVD_AUDIO_RB_WPTR_BASE_IDX 1 +#define regUVD_RBC_RB_RPTR 0x00b8 +#define regUVD_RBC_RB_RPTR_BASE_IDX 1 +#define regUVD_RBC_RB_WPTR 0x00b9 +#define regUVD_RBC_RB_WPTR_BASE_IDX 1 +#define regUVD_DPG_LMA_CTL2 0x00bb +#define regUVD_DPG_LMA_CTL2_BASE_IDX 1 + + +// addressBlock: aid_uvd0_mmsch_dec +// base address: 0x20d00 +#define regMMSCH_UCODE_ADDR 0x0540 +#define regMMSCH_UCODE_ADDR_BASE_IDX 1 +#define regMMSCH_UCODE_DATA 0x0541 +#define regMMSCH_UCODE_DATA_BASE_IDX 1 +#define regMMSCH_SRAM_ADDR 0x0542 +#define regMMSCH_SRAM_ADDR_BASE_IDX 1 +#define regMMSCH_SRAM_DATA 0x0543 +#define regMMSCH_SRAM_DATA_BASE_IDX 1 +#define regMMSCH_VF_SRAM_OFFSET 0x0544 +#define regMMSCH_VF_SRAM_OFFSET_BASE_IDX 1 +#define regMMSCH_DB_SRAM_OFFSET 0x0545 +#define regMMSCH_DB_SRAM_OFFSET_BASE_IDX 1 +#define regMMSCH_CTX_SRAM_OFFSET 0x0546 +#define regMMSCH_CTX_SRAM_OFFSET_BASE_IDX 1 +#define regMMSCH_CTL 0x0547 +#define regMMSCH_CTL_BASE_IDX 1 +#define regMMSCH_INTR 0x0548 +#define regMMSCH_INTR_BASE_IDX 1 +#define regMMSCH_INTR_ACK 0x0549 +#define regMMSCH_INTR_ACK_BASE_IDX 1 +#define regMMSCH_INTR_STATUS 0x054a +#define regMMSCH_INTR_STATUS_BASE_IDX 1 +#define regMMSCH_VF_VMID 0x054b +#define regMMSCH_VF_VMID_BASE_IDX 1 +#define regMMSCH_VF_CTX_ADDR_LO 0x054c +#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1 +#define regMMSCH_VF_CTX_ADDR_HI 0x054d +#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1 +#define regMMSCH_VF_CTX_SIZE 0x054e +#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1 +#define regMMSCH_VF_GPCOM_ADDR_LO 0x054f +#define regMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 1 +#define regMMSCH_VF_GPCOM_ADDR_HI 0x0550 +#define regMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 1 +#define regMMSCH_VF_GPCOM_SIZE 0x0551 +#define regMMSCH_VF_GPCOM_SIZE_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_HOST 0x0552 +#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_RESP 0x0553 +#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_0 0x0554 +#define regMMSCH_VF_MAILBOX_0_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_0_RESP 0x0555 +#define regMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_1 0x0556 +#define regMMSCH_VF_MAILBOX_1_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_1_RESP 0x0557 +#define regMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 1 +#define regMMSCH_CNTL 0x055c +#define regMMSCH_CNTL_BASE_IDX 1 +#define regMMSCH_NONCACHE_OFFSET0 0x055d +#define regMMSCH_NONCACHE_OFFSET0_BASE_IDX 1 +#define regMMSCH_NONCACHE_SIZE0 0x055e +#define regMMSCH_NONCACHE_SIZE0_BASE_IDX 1 +#define regMMSCH_NONCACHE_OFFSET1 0x055f +#define regMMSCH_NONCACHE_OFFSET1_BASE_IDX 1 +#define regMMSCH_NONCACHE_SIZE1 0x0560 +#define regMMSCH_NONCACHE_SIZE1_BASE_IDX 1 +#define regMMSCH_PROC_STATE1 0x0566 +#define regMMSCH_PROC_STATE1_BASE_IDX 1 +#define regMMSCH_LAST_MC_ADDR 0x0567 +#define regMMSCH_LAST_MC_ADDR_BASE_IDX 1 +#define regMMSCH_LAST_MEM_ACCESS_HI 0x0568 +#define regMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 1 +#define regMMSCH_LAST_MEM_ACCESS_LO 0x0569 +#define regMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 1 +#define regMMSCH_IOV_ACTIVE_FCN_ID 0x056a +#define regMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 1 +#define regMMSCH_SCRATCH_0 0x056b +#define regMMSCH_SCRATCH_0_BASE_IDX 1 +#define regMMSCH_SCRATCH_1 0x056c +#define regMMSCH_SCRATCH_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_SCH_BLOCK_0 0x056d +#define regMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_CONTROL_0 0x056e +#define regMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_STATUS_0 0x056f +#define regMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0570 +#define regMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0571 +#define regMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0572 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW6_0 0x0573 +#define regMMSCH_GPUIOV_DW6_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW7_0 0x0574 +#define regMMSCH_GPUIOV_DW7_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW8_0 0x0575 +#define regMMSCH_GPUIOV_DW8_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_SCH_BLOCK_1 0x0576 +#define regMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_CONTROL_1 0x0577 +#define regMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_STATUS_1 0x0578 +#define regMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0579 +#define regMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCNS_1 0x057a +#define regMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x057b +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW6_1 0x057c +#define regMMSCH_GPUIOV_DW6_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW7_1 0x057d +#define regMMSCH_GPUIOV_DW7_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW8_1 0x057e +#define regMMSCH_GPUIOV_DW8_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_CNTXT 0x057f +#define regMMSCH_GPUIOV_CNTXT_BASE_IDX 1 +#define regMMSCH_SCRATCH_2 0x0580 +#define regMMSCH_SCRATCH_2_BASE_IDX 1 +#define regMMSCH_SCRATCH_3 0x0581 +#define regMMSCH_SCRATCH_3_BASE_IDX 1 +#define regMMSCH_SCRATCH_4 0x0582 +#define regMMSCH_SCRATCH_4_BASE_IDX 1 +#define regMMSCH_SCRATCH_5 0x0583 +#define regMMSCH_SCRATCH_5_BASE_IDX 1 +#define regMMSCH_SCRATCH_6 0x0584 +#define regMMSCH_SCRATCH_6_BASE_IDX 1 +#define regMMSCH_SCRATCH_7 0x0585 +#define regMMSCH_SCRATCH_7_BASE_IDX 1 +#define regMMSCH_VFID_FIFO_HEAD_0 0x0586 +#define regMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 1 +#define regMMSCH_VFID_FIFO_TAIL_0 0x0587 +#define regMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 1 +#define regMMSCH_VFID_FIFO_HEAD_1 0x0588 +#define regMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 1 +#define regMMSCH_VFID_FIFO_TAIL_1 0x0589 +#define regMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 1 +#define regMMSCH_NACK_STATUS 0x058a +#define regMMSCH_NACK_STATUS_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX0_DATA 0x058b +#define regMMSCH_VF_MAILBOX0_DATA_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX1_DATA 0x058c +#define regMMSCH_VF_MAILBOX1_DATA_BASE_IDX 1 +#define regMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x058d +#define regMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_STATUS_IP_0 0x058e +#define regMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x058f +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 1 +#define regMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0590 +#define regMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0591 +#define regMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0592 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 1 +#define regMMSCH_GPUIOV_CNTXT_IP 0x0593 +#define regMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 1 +#define regMMSCH_GPUIOV_SCH_BLOCK_2 0x0594 +#define regMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_CONTROL_2 0x0595 +#define regMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_STATUS_2 0x0596 +#define regMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0597 +#define regMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0598 +#define regMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0599 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW6_2 0x059a +#define regMMSCH_GPUIOV_DW6_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW7_2 0x059b +#define regMMSCH_GPUIOV_DW7_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_DW8_2 0x059c +#define regMMSCH_GPUIOV_DW8_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x059d +#define regMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_CMD_STATUS_IP_2 0x059e +#define regMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 1 +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x059f +#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 1 +#define regMMSCH_VFID_FIFO_HEAD_2 0x05a0 +#define regMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 1 +#define regMMSCH_VFID_FIFO_TAIL_2 0x05a1 +#define regMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 1 +#define regMMSCH_VM_BUSY_STATUS_0 0x05a2 +#define regMMSCH_VM_BUSY_STATUS_0_BASE_IDX 1 +#define regMMSCH_VM_BUSY_STATUS_1 0x05a3 +#define regMMSCH_VM_BUSY_STATUS_1_BASE_IDX 1 +#define regMMSCH_VM_BUSY_STATUS_2 0x05a4 +#define regMMSCH_VM_BUSY_STATUS_2_BASE_IDX 1 + + +// addressBlock: aid_uvd0_slmi_adpdec +// base address: 0x21c00 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x0900 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x0901 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x0902 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0903 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0904 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0905 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0906 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0907 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0908 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0909 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x090a +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x090b +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x090c +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x090d +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x090e +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x090f +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC_VMID 0x0910 +#define regUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1 +#define regUVD_LMI_MMSCH_CTRL 0x0911 +#define regUVD_LMI_MMSCH_CTRL_BASE_IDX 1 +#define regUVD_MMSCH_LMI_STATUS 0x0912 +#define regUVD_MMSCH_LMI_STATUS_BASE_IDX 1 +#define regVCN_RAS_CNTL_MMSCH 0x0914 +#define regVCN_RAS_CNTL_MMSCH_BASE_IDX 1 + + +// addressBlock: aid_uvd0_uvd_jrbc1_uvd_jrbc_dec +// base address: 0x1e000 +#define regUVD_JRBC1_UVD_JRBC_RB_WPTR 0x0000 +#define regUVD_JRBC1_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_RB_CNTL 0x0001 +#define regUVD_JRBC1_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_IB_SIZE 0x0002 +#define regUVD_JRBC1_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_URGENT_CNTL 0x0003 +#define regUVD_JRBC1_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_RB_REF_DATA 0x0004 +#define regUVD_JRBC1_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER 0x0005 +#define regUVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_SOFT_RESET 0x0008 +#define regUVD_JRBC1_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_STATUS 0x0009 +#define regUVD_JRBC1_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_RB_RPTR 0x000a +#define regUVD_JRBC1_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_RB_BUF_STATUS 0x000b +#define regUVD_JRBC1_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_IB_BUF_STATUS 0x000c +#define regUVD_JRBC1_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE 0x000d +#define regUVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER 0x000e +#define regUVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_IB_REF_DATA 0x000f +#define regUVD_JRBC1_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JPEG_PREEMPT_CMD 0x0010 +#define regUVD_JRBC1_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0011 +#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0012 +#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_RB_SIZE 0x0013 +#define regUVD_JRBC1_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC1_UVD_JRBC_SCRATCH0 0x0014 +#define regUVD_JRBC1_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jrbc2_uvd_jrbc_dec +// base address: 0x1e100 +#define regUVD_JRBC2_UVD_JRBC_RB_WPTR 0x0040 +#define regUVD_JRBC2_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_RB_CNTL 0x0041 +#define regUVD_JRBC2_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_IB_SIZE 0x0042 +#define regUVD_JRBC2_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_URGENT_CNTL 0x0043 +#define regUVD_JRBC2_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_RB_REF_DATA 0x0044 +#define regUVD_JRBC2_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER 0x0045 +#define regUVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_SOFT_RESET 0x0048 +#define regUVD_JRBC2_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_STATUS 0x0049 +#define regUVD_JRBC2_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_RB_RPTR 0x004a +#define regUVD_JRBC2_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_RB_BUF_STATUS 0x004b +#define regUVD_JRBC2_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_IB_BUF_STATUS 0x004c +#define regUVD_JRBC2_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE 0x004d +#define regUVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER 0x004e +#define regUVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_IB_REF_DATA 0x004f +#define regUVD_JRBC2_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JPEG_PREEMPT_CMD 0x0050 +#define regUVD_JRBC2_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0051 +#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0052 +#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_RB_SIZE 0x0053 +#define regUVD_JRBC2_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC2_UVD_JRBC_SCRATCH0 0x0054 +#define regUVD_JRBC2_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jrbc3_uvd_jrbc_dec +// base address: 0x1e200 +#define regUVD_JRBC3_UVD_JRBC_RB_WPTR 0x0080 +#define regUVD_JRBC3_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_RB_CNTL 0x0081 +#define regUVD_JRBC3_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_IB_SIZE 0x0082 +#define regUVD_JRBC3_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_URGENT_CNTL 0x0083 +#define regUVD_JRBC3_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_RB_REF_DATA 0x0084 +#define regUVD_JRBC3_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER 0x0085 +#define regUVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_SOFT_RESET 0x0088 +#define regUVD_JRBC3_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_STATUS 0x0089 +#define regUVD_JRBC3_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_RB_RPTR 0x008a +#define regUVD_JRBC3_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_RB_BUF_STATUS 0x008b +#define regUVD_JRBC3_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_IB_BUF_STATUS 0x008c +#define regUVD_JRBC3_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE 0x008d +#define regUVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER 0x008e +#define regUVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_IB_REF_DATA 0x008f +#define regUVD_JRBC3_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JPEG_PREEMPT_CMD 0x0090 +#define regUVD_JRBC3_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0091 +#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0092 +#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_RB_SIZE 0x0093 +#define regUVD_JRBC3_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC3_UVD_JRBC_SCRATCH0 0x0094 +#define regUVD_JRBC3_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jrbc4_uvd_jrbc_dec +// base address: 0x1e300 +#define regUVD_JRBC4_UVD_JRBC_RB_WPTR 0x00c0 +#define regUVD_JRBC4_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_RB_CNTL 0x00c1 +#define regUVD_JRBC4_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_IB_SIZE 0x00c2 +#define regUVD_JRBC4_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_URGENT_CNTL 0x00c3 +#define regUVD_JRBC4_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_RB_REF_DATA 0x00c4 +#define regUVD_JRBC4_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER 0x00c5 +#define regUVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_SOFT_RESET 0x00c8 +#define regUVD_JRBC4_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_STATUS 0x00c9 +#define regUVD_JRBC4_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_RB_RPTR 0x00ca +#define regUVD_JRBC4_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_RB_BUF_STATUS 0x00cb +#define regUVD_JRBC4_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_IB_BUF_STATUS 0x00cc +#define regUVD_JRBC4_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE 0x00cd +#define regUVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER 0x00ce +#define regUVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_IB_REF_DATA 0x00cf +#define regUVD_JRBC4_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JPEG_PREEMPT_CMD 0x00d0 +#define regUVD_JRBC4_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0 0x00d1 +#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1 0x00d2 +#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_RB_SIZE 0x00d3 +#define regUVD_JRBC4_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC4_UVD_JRBC_SCRATCH0 0x00d4 +#define regUVD_JRBC4_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jrbc5_uvd_jrbc_dec +// base address: 0x1e400 +#define regUVD_JRBC5_UVD_JRBC_RB_WPTR 0x0100 +#define regUVD_JRBC5_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_RB_CNTL 0x0101 +#define regUVD_JRBC5_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_IB_SIZE 0x0102 +#define regUVD_JRBC5_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_URGENT_CNTL 0x0103 +#define regUVD_JRBC5_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_RB_REF_DATA 0x0104 +#define regUVD_JRBC5_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER 0x0105 +#define regUVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_SOFT_RESET 0x0108 +#define regUVD_JRBC5_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_STATUS 0x0109 +#define regUVD_JRBC5_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_RB_RPTR 0x010a +#define regUVD_JRBC5_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_RB_BUF_STATUS 0x010b +#define regUVD_JRBC5_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_IB_BUF_STATUS 0x010c +#define regUVD_JRBC5_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE 0x010d +#define regUVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER 0x010e +#define regUVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_IB_REF_DATA 0x010f +#define regUVD_JRBC5_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JPEG_PREEMPT_CMD 0x0110 +#define regUVD_JRBC5_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0111 +#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0112 +#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_RB_SIZE 0x0113 +#define regUVD_JRBC5_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC5_UVD_JRBC_SCRATCH0 0x0114 +#define regUVD_JRBC5_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jrbc6_uvd_jrbc_dec +// base address: 0x1e500 +#define regUVD_JRBC6_UVD_JRBC_RB_WPTR 0x0140 +#define regUVD_JRBC6_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_RB_CNTL 0x0141 +#define regUVD_JRBC6_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_IB_SIZE 0x0142 +#define regUVD_JRBC6_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_URGENT_CNTL 0x0143 +#define regUVD_JRBC6_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_RB_REF_DATA 0x0144 +#define regUVD_JRBC6_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER 0x0145 +#define regUVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_SOFT_RESET 0x0148 +#define regUVD_JRBC6_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_STATUS 0x0149 +#define regUVD_JRBC6_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_RB_RPTR 0x014a +#define regUVD_JRBC6_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_RB_BUF_STATUS 0x014b +#define regUVD_JRBC6_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_IB_BUF_STATUS 0x014c +#define regUVD_JRBC6_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE 0x014d +#define regUVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER 0x014e +#define regUVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_IB_REF_DATA 0x014f +#define regUVD_JRBC6_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JPEG_PREEMPT_CMD 0x0150 +#define regUVD_JRBC6_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0151 +#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0152 +#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_RB_SIZE 0x0153 +#define regUVD_JRBC6_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC6_UVD_JRBC_SCRATCH0 0x0154 +#define regUVD_JRBC6_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jrbc7_uvd_jrbc_dec +// base address: 0x1e600 +#define regUVD_JRBC7_UVD_JRBC_RB_WPTR 0x0180 +#define regUVD_JRBC7_UVD_JRBC_RB_WPTR_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_RB_CNTL 0x0181 +#define regUVD_JRBC7_UVD_JRBC_RB_CNTL_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_IB_SIZE 0x0182 +#define regUVD_JRBC7_UVD_JRBC_IB_SIZE_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_URGENT_CNTL 0x0183 +#define regUVD_JRBC7_UVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_RB_REF_DATA 0x0184 +#define regUVD_JRBC7_UVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER 0x0185 +#define regUVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_SOFT_RESET 0x0188 +#define regUVD_JRBC7_UVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_STATUS 0x0189 +#define regUVD_JRBC7_UVD_JRBC_STATUS_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_RB_RPTR 0x018a +#define regUVD_JRBC7_UVD_JRBC_RB_RPTR_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_RB_BUF_STATUS 0x018b +#define regUVD_JRBC7_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_IB_BUF_STATUS 0x018c +#define regUVD_JRBC7_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE 0x018d +#define regUVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER 0x018e +#define regUVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_IB_REF_DATA 0x018f +#define regUVD_JRBC7_UVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JPEG_PREEMPT_CMD 0x0190 +#define regUVD_JRBC7_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0191 +#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0192 +#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_RB_SIZE 0x0193 +#define regUVD_JRBC7_UVD_JRBC_RB_SIZE_BASE_IDX 0 +#define regUVD_JRBC7_UVD_JRBC_SCRATCH0 0x0194 +#define regUVD_JRBC7_UVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi1_uvd_jmi_dec +// base address: 0x1e080 +#define regUVD_JMI1_UVD_JPEG_DEC_PF_CTRL 0x0020 +#define regUVD_JMI1_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_CTRL 0x0021 +#define regUVD_JMI1_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_CTRL 0x0022 +#define regUVD_JMI1_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI1_JPEG_LMI_DROP 0x0023 +#define regUVD_JMI1_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_VMID 0x0024 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_VMID 0x0025 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_VMID 0x0026 +#define regUVD_JMI1_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0027 +#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0028 +#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0029 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x002a +#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x002b +#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x002c +#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID 0x002d +#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI1_UVD_JMI_DEC_SWAP_CNTL 0x002e +#define regUVD_JMI1_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL 0x002f +#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0030 +#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0031 +#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0032 +#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0033 +#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0034 +#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0035 +#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0036 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0037 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0038 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0039 +#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2 0x003d +#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi2_uvd_jmi_dec +// base address: 0x1e180 +#define regUVD_JMI2_UVD_JPEG_DEC_PF_CTRL 0x0060 +#define regUVD_JMI2_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_CTRL 0x0061 +#define regUVD_JMI2_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_CTRL 0x0062 +#define regUVD_JMI2_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI2_JPEG_LMI_DROP 0x0063 +#define regUVD_JMI2_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_VMID 0x0064 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_VMID 0x0065 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_VMID 0x0066 +#define regUVD_JMI2_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0067 +#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0068 +#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0069 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x006a +#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x006b +#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x006c +#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID 0x006d +#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI2_UVD_JMI_DEC_SWAP_CNTL 0x006e +#define regUVD_JMI2_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL 0x006f +#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0070 +#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0071 +#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0072 +#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0073 +#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0074 +#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0075 +#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0076 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0077 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0078 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0079 +#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2 0x007d +#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi3_uvd_jmi_dec +// base address: 0x1e280 +#define regUVD_JMI3_UVD_JPEG_DEC_PF_CTRL 0x00a0 +#define regUVD_JMI3_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_CTRL 0x00a1 +#define regUVD_JMI3_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_CTRL 0x00a2 +#define regUVD_JMI3_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI3_JPEG_LMI_DROP 0x00a3 +#define regUVD_JMI3_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_VMID 0x00a4 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_VMID 0x00a5 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_VMID 0x00a6 +#define regUVD_JMI3_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x00a7 +#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x00a8 +#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x00a9 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x00aa +#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x00ab +#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x00ac +#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID 0x00ad +#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI3_UVD_JMI_DEC_SWAP_CNTL 0x00ae +#define regUVD_JMI3_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL 0x00af +#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x00b0 +#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x00b1 +#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x00b2 +#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x00b3 +#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x00b4 +#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x00b5 +#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x00b6 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x00b7 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x00b8 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x00b9 +#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2 0x00bd +#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi4_uvd_jmi_dec +// base address: 0x1e380 +#define regUVD_JMI4_UVD_JPEG_DEC_PF_CTRL 0x00e0 +#define regUVD_JMI4_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_CTRL 0x00e1 +#define regUVD_JMI4_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_CTRL 0x00e2 +#define regUVD_JMI4_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI4_JPEG_LMI_DROP 0x00e3 +#define regUVD_JMI4_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_VMID 0x00e4 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_VMID 0x00e5 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_VMID 0x00e6 +#define regUVD_JMI4_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x00e7 +#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x00e8 +#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x00e9 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x00ea +#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x00eb +#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x00ec +#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID 0x00ed +#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI4_UVD_JMI_DEC_SWAP_CNTL 0x00ee +#define regUVD_JMI4_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL 0x00ef +#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x00f0 +#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x00f1 +#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x00f2 +#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x00f3 +#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x00f4 +#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x00f5 +#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x00f6 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x00f7 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x00f8 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x00f9 +#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2 0x00fd +#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi5_uvd_jmi_dec +// base address: 0x1e480 +#define regUVD_JMI5_UVD_JPEG_DEC_PF_CTRL 0x0120 +#define regUVD_JMI5_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_CTRL 0x0121 +#define regUVD_JMI5_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_CTRL 0x0122 +#define regUVD_JMI5_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI5_JPEG_LMI_DROP 0x0123 +#define regUVD_JMI5_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_VMID 0x0124 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_VMID 0x0125 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_VMID 0x0126 +#define regUVD_JMI5_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0127 +#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0128 +#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0129 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x012a +#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x012b +#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x012c +#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID 0x012d +#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI5_UVD_JMI_DEC_SWAP_CNTL 0x012e +#define regUVD_JMI5_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL 0x012f +#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0130 +#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0131 +#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0132 +#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0133 +#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0134 +#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0135 +#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0136 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0137 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0138 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0139 +#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2 0x013d +#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi6_uvd_jmi_dec +// base address: 0x1e580 +#define regUVD_JMI6_UVD_JPEG_DEC_PF_CTRL 0x0160 +#define regUVD_JMI6_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_CTRL 0x0161 +#define regUVD_JMI6_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_CTRL 0x0162 +#define regUVD_JMI6_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI6_JPEG_LMI_DROP 0x0163 +#define regUVD_JMI6_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_VMID 0x0164 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_VMID 0x0165 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_VMID 0x0166 +#define regUVD_JMI6_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0167 +#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0168 +#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0169 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x016a +#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x016b +#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x016c +#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID 0x016d +#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI6_UVD_JMI_DEC_SWAP_CNTL 0x016e +#define regUVD_JMI6_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL 0x016f +#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0170 +#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0171 +#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0172 +#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0173 +#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0174 +#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0175 +#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0176 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0177 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0178 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0179 +#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2 0x017d +#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: aid_uvd0_uvd_jmi7_uvd_jmi_dec +// base address: 0x1e680 +#define regUVD_JMI7_UVD_JPEG_DEC_PF_CTRL 0x01a0 +#define regUVD_JMI7_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_CTRL 0x01a1 +#define regUVD_JMI7_UVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_CTRL 0x01a2 +#define regUVD_JMI7_UVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define regUVD_JMI7_JPEG_LMI_DROP 0x01a3 +#define regUVD_JMI7_JPEG_LMI_DROP_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_VMID 0x01a4 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_VMID 0x01a5 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_VMID 0x01a6 +#define regUVD_JMI7_UVD_LMI_JPEG_VMID_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x01a7 +#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x01a8 +#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x01a9 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x01aa +#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x01ab +#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x01ac +#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID 0x01ad +#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define regUVD_JMI7_UVD_JMI_DEC_SWAP_CNTL 0x01ae +#define regUVD_JMI7_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL 0x01af +#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0 +#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x01b0 +#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x01b1 +#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x01b2 +#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x01b3 +#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x01b4 +#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x01b5 +#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x01b6 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x01b7 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x01b8 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x01b9 +#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2 0x01bd +#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0 + + +// addressBlock: uvdctxind +// base address: 0x0 +#define ixUVD_CGC_MEM_CTRL 0x0000 +#define ixUVD_CGC_CTRL2 0x0001 +#define ixUVD_CGC_MEM_DS_CTRL 0x0002 +#define ixUVD_CGC_MEM_SD_CTRL 0x0003 +#define ixUVD_SW_SCRATCH_00 0x0004 +#define ixUVD_SW_SCRATCH_01 0x0005 +#define ixUVD_SW_SCRATCH_02 0x0006 +#define ixUVD_SW_SCRATCH_03 0x0007 +#define ixUVD_SW_SCRATCH_04 0x0008 +#define ixUVD_SW_SCRATCH_05 0x0009 +#define ixUVD_SW_SCRATCH_06 0x000a +#define ixUVD_SW_SCRATCH_07 0x000b +#define ixUVD_SW_SCRATCH_08 0x000c +#define ixUVD_SW_SCRATCH_09 0x000d +#define ixUVD_SW_SCRATCH_10 0x000e +#define ixUVD_SW_SCRATCH_11 0x000f +#define ixUVD_SW_SCRATCH_12 0x0010 +#define ixUVD_SW_SCRATCH_13 0x0011 +#define ixUVD_SW_SCRATCH_14 0x0012 +#define ixUVD_SW_SCRATCH_15 0x0013 +#define ixUVD_IH_SEM_CTRL 0x001e + + +// addressBlock: lmi_adp_indirect +// base address: 0x0 +#define ixUVD_LMI_CRC0 0x0000 +#define ixUVD_LMI_CRC1 0x0001 +#define ixUVD_LMI_CRC2 0x0002 +#define ixUVD_LMI_CRC3 0x0003 +#define ixUVD_LMI_CRC10 0x000a +#define ixUVD_LMI_CRC11 0x000b +#define ixUVD_LMI_CRC12 0x000c +#define ixUVD_LMI_CRC13 0x000d +#define ixUVD_LMI_CRC14 0x000e +#define ixUVD_LMI_CRC15 0x000f +#define ixUVD_LMI_SWAP_CNTL2 0x0029 +#define ixUVD_MEMCHECK_SYS_INT_EN 0x0134 +#define ixUVD_MEMCHECK_SYS_INT_STAT 0x0135 +#define ixUVD_MEMCHECK_SYS_INT_ACK 0x0136 +#define ixUVD_MEMCHECK_VCPU_INT_EN 0x0137 +#define ixUVD_MEMCHECK_VCPU_INT_STAT 0x0138 +#define ixUVD_MEMCHECK_VCPU_INT_ACK 0x0139 +#define ixUVD_MEMCHECK2_SYS_INT_STAT 0x0140 +#define ixUVD_MEMCHECK2_SYS_INT_ACK 0x0141 +#define ixUVD_MEMCHECK2_VCPU_INT_STAT 0x0142 +#define ixUVD_MEMCHECK2_VCPU_INT_ACK 0x0143 + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h new file mode 100644 index 000000000000..be643ea0e569 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h @@ -0,0 +1,10424 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _vcn_4_0_3_SH_MASK_HEADER +#define _vcn_4_0_3_SH_MASK_HEADER + + +// addressBlock: aid_uvd0_uvddec +//UVD_TOP_CTRL +#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0 +#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4 +#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL +#define UVD_TOP_CTRL__STD_VERSION_MASK 0x00000010L +//UVD_CGC_GATE +#define UVD_CGC_GATE__SYS__SHIFT 0x0 +#define UVD_CGC_GATE__UDEC__SHIFT 0x1 +#define UVD_CGC_GATE__MPEG2__SHIFT 0x2 +#define UVD_CGC_GATE__REGS__SHIFT 0x3 +#define UVD_CGC_GATE__RBC__SHIFT 0x4 +#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5 +#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6 +#define UVD_CGC_GATE__IDCT__SHIFT 0x7 +#define UVD_CGC_GATE__MPRD__SHIFT 0x8 +#define UVD_CGC_GATE__MPC__SHIFT 0x9 +#define UVD_CGC_GATE__LBSI__SHIFT 0xa +#define UVD_CGC_GATE__LRBBM__SHIFT 0xb +#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc +#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd +#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe +#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf +#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10 +#define UVD_CGC_GATE__WCB__SHIFT 0x11 +#define UVD_CGC_GATE__VCPU__SHIFT 0x12 +#define UVD_CGC_GATE__MMSCH__SHIFT 0x14 +#define UVD_CGC_GATE__LCM0__SHIFT 0x15 +#define UVD_CGC_GATE__LCM1__SHIFT 0x16 +#define UVD_CGC_GATE__MIF__SHIFT 0x17 +#define UVD_CGC_GATE__VREG__SHIFT 0x18 +#define UVD_CGC_GATE__PE__SHIFT 0x19 +#define UVD_CGC_GATE__PPU__SHIFT 0x1a +#define UVD_CGC_GATE__SYS_MASK 0x00000001L +#define UVD_CGC_GATE__UDEC_MASK 0x00000002L +#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L +#define UVD_CGC_GATE__REGS_MASK 0x00000008L +#define UVD_CGC_GATE__RBC_MASK 0x00000010L +#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L +#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L +#define UVD_CGC_GATE__IDCT_MASK 0x00000080L +#define UVD_CGC_GATE__MPRD_MASK 0x00000100L +#define UVD_CGC_GATE__MPC_MASK 0x00000200L +#define UVD_CGC_GATE__LBSI_MASK 0x00000400L +#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L +#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L +#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L +#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L +#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L +#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L +#define UVD_CGC_GATE__WCB_MASK 0x00020000L +#define UVD_CGC_GATE__VCPU_MASK 0x00040000L +#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L +#define UVD_CGC_GATE__LCM0_MASK 0x00200000L +#define UVD_CGC_GATE__LCM1_MASK 0x00400000L +#define UVD_CGC_GATE__MIF_MASK 0x00800000L +#define UVD_CGC_GATE__VREG_MASK 0x01000000L +#define UVD_CGC_GATE__PE_MASK 0x02000000L +#define UVD_CGC_GATE__PPU_MASK 0x04000000L +//UVD_CGC_CTRL +#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 +#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2 +#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6 +#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb +#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc +#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd +#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe +#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf +#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10 +#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11 +#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12 +#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13 +#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14 +#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15 +#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16 +#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17 +#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18 +#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19 +#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a +#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b +#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c +#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d +#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f +#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L +#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL +#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L +#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L +#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L +#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L +#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L +#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L +#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L +#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L +#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L +#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L +#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L +#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L +#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L +#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L +#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L +#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L +#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L +#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L +#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L +#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L +#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L +//AVM_SUVD_CGC_GATE +#define AVM_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define AVM_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define AVM_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define AVM_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define AVM_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define AVM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define AVM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define AVM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define AVM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define AVM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define AVM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define AVM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define AVM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define AVM_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define AVM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define AVM_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define AVM_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define AVM_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define AVM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define AVM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define AVM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define AVM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define AVM_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define AVM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define AVM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define AVM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define AVM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define AVM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define AVM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define AVM_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define AVM_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define AVM_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define AVM_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define AVM_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define AVM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define AVM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define AVM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define AVM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define AVM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define AVM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define AVM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define AVM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define AVM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define AVM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define AVM_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define AVM_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define AVM_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define AVM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define AVM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define AVM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define AVM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define AVM_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define AVM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define AVM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define AVM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define AVM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define AVM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define AVM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//CDEFE_SUVD_CGC_GATE +#define CDEFE_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define CDEFE_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define CDEFE_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define CDEFE_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define CDEFE_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define CDEFE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define CDEFE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define CDEFE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define CDEFE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define CDEFE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define CDEFE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define CDEFE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define CDEFE_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define CDEFE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define CDEFE_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define CDEFE_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define CDEFE_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define CDEFE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define CDEFE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define CDEFE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define CDEFE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define CDEFE_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define CDEFE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define CDEFE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define CDEFE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define CDEFE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define CDEFE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define CDEFE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define CDEFE_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define CDEFE_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define CDEFE_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define CDEFE_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define CDEFE_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define CDEFE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define CDEFE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define CDEFE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define CDEFE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define CDEFE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define CDEFE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define CDEFE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define CDEFE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define CDEFE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define CDEFE_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define CDEFE_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define CDEFE_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define CDEFE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define CDEFE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define CDEFE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define CDEFE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define CDEFE_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define CDEFE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define CDEFE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define CDEFE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define CDEFE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define CDEFE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define CDEFE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//EFC_SUVD_CGC_GATE +#define EFC_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define EFC_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define EFC_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define EFC_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define EFC_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define EFC_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define EFC_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define EFC_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define EFC_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define EFC_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define EFC_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define EFC_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define EFC_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define EFC_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define EFC_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define EFC_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define EFC_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define EFC_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define EFC_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define EFC_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define EFC_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define EFC_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define EFC_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define EFC_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define EFC_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define EFC_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define EFC_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define EFC_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define EFC_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define EFC_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define EFC_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define EFC_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define EFC_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define EFC_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define EFC_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define EFC_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define EFC_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define EFC_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define EFC_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define EFC_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define EFC_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define EFC_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define EFC_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define EFC_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define EFC_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define EFC_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define EFC_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define EFC_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define EFC_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define EFC_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define EFC_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define EFC_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define EFC_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define EFC_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define EFC_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define EFC_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define EFC_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define EFC_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//ENT_SUVD_CGC_GATE +#define ENT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define ENT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define ENT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define ENT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define ENT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define ENT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define ENT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define ENT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define ENT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define ENT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define ENT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define ENT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define ENT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define ENT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define ENT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define ENT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define ENT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define ENT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define ENT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define ENT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define ENT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define ENT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define ENT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define ENT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define ENT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define ENT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define ENT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define ENT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define ENT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define ENT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define ENT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define ENT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define ENT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define ENT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define ENT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define ENT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define ENT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define ENT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define ENT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define ENT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define ENT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define ENT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define ENT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define ENT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define ENT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define ENT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define ENT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define ENT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define ENT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define ENT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define ENT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define ENT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define ENT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define ENT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define ENT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define ENT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define ENT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define ENT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//IME_SUVD_CGC_GATE +#define IME_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define IME_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define IME_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define IME_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define IME_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define IME_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define IME_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define IME_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define IME_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define IME_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define IME_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define IME_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define IME_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define IME_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define IME_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define IME_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define IME_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define IME_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define IME_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define IME_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define IME_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define IME_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define IME_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define IME_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define IME_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define IME_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define IME_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define IME_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define IME_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define IME_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define IME_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define IME_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define IME_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define IME_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define IME_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define IME_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define IME_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define IME_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define IME_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define IME_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define IME_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define IME_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define IME_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define IME_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define IME_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define IME_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define IME_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define IME_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define IME_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define IME_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define IME_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define IME_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define IME_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define IME_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define IME_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define IME_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define IME_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define IME_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define IME_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define IME_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//PPU_SUVD_CGC_GATE +#define PPU_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define PPU_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define PPU_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define PPU_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define PPU_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define PPU_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define PPU_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define PPU_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define PPU_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define PPU_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define PPU_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define PPU_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define PPU_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define PPU_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define PPU_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define PPU_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define PPU_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define PPU_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define PPU_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define PPU_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define PPU_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define PPU_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define PPU_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define PPU_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define PPU_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define PPU_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define PPU_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define PPU_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define PPU_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define PPU_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define PPU_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define PPU_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define PPU_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define PPU_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define PPU_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define PPU_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define PPU_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define PPU_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define PPU_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define PPU_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define PPU_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define PPU_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define PPU_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define PPU_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define PPU_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define PPU_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define PPU_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define PPU_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define PPU_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define PPU_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define PPU_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define PPU_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define PPU_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define PPU_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define PPU_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define PPU_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define PPU_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define PPU_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SAOE_SUVD_CGC_GATE +#define SAOE_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SAOE_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SAOE_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SAOE_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SAOE_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SAOE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SAOE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SAOE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SAOE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SAOE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SAOE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SAOE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SAOE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SAOE_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SAOE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SAOE_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SAOE_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SAOE_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SAOE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SAOE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SAOE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SAOE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SAOE_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SAOE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SAOE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SAOE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SAOE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SAOE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SAOE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SAOE_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SAOE_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SAOE_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SAOE_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SAOE_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SAOE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SAOE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SAOE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SAOE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SAOE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SAOE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SAOE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SAOE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SAOE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SAOE_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SAOE_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SAOE_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SAOE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SAOE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SAOE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SAOE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SAOE_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SAOE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SAOE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SAOE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SAOE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SAOE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SAOE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SCM_SUVD_CGC_GATE +#define SCM_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SCM_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SCM_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SCM_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SCM_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SCM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SCM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SCM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SCM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SCM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SCM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SCM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SCM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SCM_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SCM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SCM_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SCM_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SCM_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SCM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SCM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SCM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SCM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SCM_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SCM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SCM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SCM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SCM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SCM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SCM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SCM_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SCM_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SCM_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SCM_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SCM_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SCM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SCM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SCM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SCM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SCM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SCM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SCM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SCM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SCM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SCM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SCM_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SCM_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SCM_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SCM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SCM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SCM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SCM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SCM_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SCM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SCM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SCM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SCM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SCM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SCM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SDB_SUVD_CGC_GATE +#define SDB_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SDB_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SDB_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SDB_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SDB_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SDB_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SDB_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SDB_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SDB_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SDB_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SDB_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SDB_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SDB_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SDB_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SDB_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SDB_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SDB_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SDB_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SDB_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SDB_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SDB_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SDB_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SDB_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SDB_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SDB_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SDB_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SDB_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SDB_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SDB_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SDB_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SDB_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SDB_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SDB_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SDB_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SDB_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SDB_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SDB_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SDB_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SDB_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SDB_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SDB_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SDB_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SDB_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SDB_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SDB_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SDB_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SDB_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SDB_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SDB_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SDB_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SDB_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SDB_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SDB_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SDB_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SDB_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SDB_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SDB_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SDB_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT0_NXT_SUVD_CGC_GATE +#define SIT0_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT0_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT0_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT0_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT0_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT0_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT0_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT0_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT0_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT0_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT0_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT0_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT0_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT0_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT0_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT0_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT0_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT0_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT0_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT0_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT0_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT1_NXT_SUVD_CGC_GATE +#define SIT1_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT1_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT1_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT1_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT1_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT1_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT1_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT1_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT1_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT1_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT1_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT1_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT1_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT1_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT1_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT1_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT1_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT1_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT1_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT1_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT1_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT2_NXT_SUVD_CGC_GATE +#define SIT2_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT2_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT2_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT2_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT2_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT2_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT2_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT2_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT2_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT2_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT2_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT2_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT2_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT2_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT2_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT2_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT2_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT2_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT2_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT2_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT2_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT_SUVD_CGC_GATE +#define SIT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SMPA_SUVD_CGC_GATE +#define SMPA_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SMPA_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SMPA_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SMPA_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SMPA_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SMPA_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SMPA_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SMPA_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SMPA_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SMPA_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SMPA_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SMPA_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SMPA_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SMPA_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SMPA_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SMPA_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SMPA_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SMPA_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SMPA_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SMPA_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SMPA_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SMPA_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SMPA_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SMPA_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SMPA_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SMPA_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SMPA_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SMPA_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SMPA_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SMPA_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SMPA_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SMPA_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SMPA_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SMPA_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SMPA_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SMPA_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SMPA_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SMPA_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SMPA_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SMPA_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SMPA_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SMPA_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SMPA_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SMPA_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SMPA_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SMPA_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SMPA_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SMPA_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SMPA_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SMPA_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SMPA_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SMPA_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SMPA_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SMPA_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SMPA_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SMPA_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SMPA_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SMP_SUVD_CGC_GATE +#define SMP_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SMP_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SMP_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SMP_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SMP_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SMP_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SMP_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SMP_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SMP_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SMP_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SMP_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SMP_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SMP_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SMP_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SMP_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SMP_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SMP_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SMP_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SMP_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SMP_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SMP_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SMP_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SMP_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SMP_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SMP_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SMP_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SMP_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SMP_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SMP_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SMP_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SMP_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SMP_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SMP_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SMP_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SMP_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SMP_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SMP_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SMP_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SMP_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SMP_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SMP_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SMP_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SMP_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SMP_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SMP_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SMP_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SMP_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SMP_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SMP_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SMP_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SMP_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SMP_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SMP_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SMP_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SMP_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SMP_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SMP_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SMP_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SRE_SUVD_CGC_GATE +#define SRE_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SRE_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SRE_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SRE_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SRE_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SRE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SRE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SRE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SRE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SRE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SRE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SRE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SRE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SRE_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SRE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SRE_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SRE_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SRE_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SRE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SRE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SRE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SRE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SRE_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SRE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SRE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SRE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SRE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SRE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SRE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SRE_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SRE_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SRE_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SRE_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SRE_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SRE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SRE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SRE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SRE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SRE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SRE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SRE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SRE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SRE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SRE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SRE_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SRE_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SRE_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SRE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SRE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SRE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SRE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SRE_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SRE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SRE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SRE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SRE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SRE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SRE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//UVD_MPBE0_SUVD_CGC_GATE +#define UVD_MPBE0_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define UVD_MPBE0_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define UVD_MPBE0_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define UVD_MPBE0_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define UVD_MPBE0_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define UVD_MPBE0_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define UVD_MPBE0_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define UVD_MPBE0_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define UVD_MPBE0_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define UVD_MPBE0_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define UVD_MPBE0_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define UVD_MPBE0_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define UVD_MPBE0_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define UVD_MPBE0_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define UVD_MPBE0_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define UVD_MPBE0_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define UVD_MPBE0_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define UVD_MPBE0_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define UVD_MPBE0_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define UVD_MPBE0_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define UVD_MPBE0_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define UVD_MPBE0_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define UVD_MPBE0_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define UVD_MPBE0_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define UVD_MPBE0_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define UVD_MPBE0_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define UVD_MPBE0_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define UVD_MPBE0_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define UVD_MPBE0_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define UVD_MPBE0_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define UVD_MPBE0_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define UVD_MPBE0_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//UVD_MPBE1_SUVD_CGC_GATE +#define UVD_MPBE1_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define UVD_MPBE1_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define UVD_MPBE1_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define UVD_MPBE1_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define UVD_MPBE1_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define UVD_MPBE1_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define UVD_MPBE1_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define UVD_MPBE1_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define UVD_MPBE1_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define UVD_MPBE1_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define UVD_MPBE1_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define UVD_MPBE1_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define UVD_MPBE1_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define UVD_MPBE1_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define UVD_MPBE1_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define UVD_MPBE1_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define UVD_MPBE1_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define UVD_MPBE1_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define UVD_MPBE1_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define UVD_MPBE1_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define UVD_MPBE1_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define UVD_MPBE1_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define UVD_MPBE1_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define UVD_MPBE1_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define UVD_MPBE1_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define UVD_MPBE1_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define UVD_MPBE1_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define UVD_MPBE1_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define UVD_MPBE1_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define UVD_MPBE1_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define UVD_MPBE1_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define UVD_MPBE1_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//UVD_SUVD_CGC_GATE +#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//AVM_SUVD_CGC_GATE2 +#define AVM_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define AVM_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define AVM_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define AVM_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define AVM_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define AVM_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define AVM_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define AVM_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define AVM_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define AVM_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define AVM_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define AVM_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define AVM_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define AVM_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define AVM_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define AVM_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//CDEFE_SUVD_CGC_GATE2 +#define CDEFE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define CDEFE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define CDEFE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define CDEFE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define CDEFE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define CDEFE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define CDEFE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define CDEFE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define CDEFE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define CDEFE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define CDEFE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define CDEFE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define CDEFE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define CDEFE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define CDEFE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define CDEFE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//DBR_SUVD_CGC_GATE2 +#define DBR_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define DBR_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define DBR_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define DBR_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define DBR_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define DBR_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define DBR_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define DBR_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define DBR_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define DBR_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define DBR_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define DBR_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define DBR_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define DBR_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define DBR_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define DBR_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//ENT_SUVD_CGC_GATE2 +#define ENT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define ENT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define ENT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define ENT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define ENT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define ENT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define ENT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define ENT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define ENT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define ENT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define ENT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define ENT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define ENT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define ENT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define ENT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define ENT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//IME_SUVD_CGC_GATE2 +#define IME_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define IME_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define IME_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define IME_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define IME_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define IME_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define IME_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define IME_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define IME_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define IME_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define IME_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define IME_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define IME_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define IME_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define IME_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define IME_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//MPC1_SUVD_CGC_GATE2 +#define MPC1_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define MPC1_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define MPC1_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define MPC1_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define MPC1_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define MPC1_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define MPC1_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define MPC1_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define MPC1_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define MPC1_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define MPC1_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define MPC1_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define MPC1_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define MPC1_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define MPC1_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define MPC1_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define MPC1_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define MPC1_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define MPC1_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define MPC1_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define MPC1_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define MPC1_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define MPC1_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define MPC1_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SAOE_SUVD_CGC_GATE2 +#define SAOE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SAOE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SAOE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SAOE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SAOE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SAOE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SAOE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SAOE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SAOE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SAOE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SAOE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SAOE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SAOE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SAOE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SAOE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SAOE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SDB_SUVD_CGC_GATE2 +#define SDB_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SDB_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SDB_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SDB_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SDB_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SDB_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SDB_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SDB_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SDB_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SDB_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SDB_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SDB_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SDB_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SDB_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SDB_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SDB_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT0_NXT_SUVD_CGC_GATE2 +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT0_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT0_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT1_NXT_SUVD_CGC_GATE2 +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT1_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT1_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT2_NXT_SUVD_CGC_GATE2 +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT2_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT2_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT_SUVD_CGC_GATE2 +#define SIT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SMPA_SUVD_CGC_GATE2 +#define SMPA_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SMPA_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SMPA_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SMPA_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SMPA_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SMPA_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SMPA_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SMPA_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SMPA_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SMPA_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SMPA_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SMPA_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SMPA_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SMPA_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SMPA_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SMPA_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SMP_SUVD_CGC_GATE2 +#define SMP_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SMP_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SMP_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SMP_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SMP_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SMP_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SMP_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SMP_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SMP_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SMP_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SMP_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SMP_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SMP_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SMP_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SMP_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SMP_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SRE_SUVD_CGC_GATE2 +#define SRE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SRE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SRE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SRE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SRE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SRE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SRE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SRE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SRE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SRE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SRE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SRE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SRE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SRE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SRE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SRE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//UVD_MPBE0_SUVD_CGC_GATE2 +#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define UVD_MPBE0_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define UVD_MPBE0_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define UVD_MPBE0_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define UVD_MPBE0_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define UVD_MPBE0_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define UVD_MPBE0_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define UVD_MPBE0_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define UVD_MPBE0_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define UVD_MPBE0_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define UVD_MPBE0_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +//UVD_MPBE1_SUVD_CGC_GATE2 +#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define UVD_MPBE1_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define UVD_MPBE1_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define UVD_MPBE1_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define UVD_MPBE1_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define UVD_MPBE1_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define UVD_MPBE1_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define UVD_MPBE1_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define UVD_MPBE1_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define UVD_MPBE1_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define UVD_MPBE1_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +//UVD_SUVD_CGC_GATE2 +#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define UVD_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define UVD_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define UVD_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define UVD_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define UVD_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define UVD_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//AVM_SUVD_CGC_CTRL +#define AVM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define AVM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define AVM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define AVM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define AVM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define AVM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define AVM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define AVM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define AVM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define AVM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define AVM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define AVM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define AVM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define AVM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define AVM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define AVM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define AVM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define AVM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define AVM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define AVM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define AVM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define AVM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define AVM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define AVM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define AVM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define AVM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define AVM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define AVM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define AVM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define AVM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define AVM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define AVM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define AVM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define AVM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define AVM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define AVM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define AVM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define AVM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define AVM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define AVM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//CDEFE_SUVD_CGC_CTRL +#define CDEFE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define CDEFE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define CDEFE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define CDEFE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define CDEFE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define CDEFE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define CDEFE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define CDEFE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define CDEFE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define CDEFE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define CDEFE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define CDEFE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define CDEFE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define CDEFE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define CDEFE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define CDEFE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define CDEFE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define CDEFE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define CDEFE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define CDEFE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define CDEFE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define CDEFE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//DBR_SUVD_CGC_CTRL +#define DBR_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define DBR_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define DBR_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define DBR_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define DBR_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define DBR_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define DBR_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define DBR_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define DBR_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define DBR_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define DBR_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define DBR_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define DBR_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define DBR_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define DBR_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define DBR_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define DBR_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define DBR_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define DBR_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define DBR_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define DBR_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define DBR_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define DBR_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define DBR_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define DBR_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define DBR_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define DBR_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define DBR_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define DBR_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define DBR_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define DBR_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define DBR_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define DBR_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define DBR_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define DBR_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define DBR_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define DBR_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define DBR_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define DBR_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define DBR_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//EFC_SUVD_CGC_CTRL +#define EFC_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define EFC_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define EFC_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define EFC_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define EFC_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define EFC_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define EFC_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define EFC_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define EFC_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define EFC_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define EFC_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define EFC_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define EFC_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define EFC_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define EFC_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define EFC_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define EFC_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define EFC_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define EFC_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define EFC_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define EFC_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define EFC_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define EFC_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define EFC_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define EFC_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define EFC_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define EFC_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define EFC_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define EFC_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define EFC_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define EFC_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define EFC_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define EFC_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define EFC_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define EFC_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define EFC_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define EFC_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define EFC_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define EFC_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define EFC_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//ENT_SUVD_CGC_CTRL +#define ENT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define ENT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define ENT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define ENT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define ENT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define ENT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define ENT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define ENT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define ENT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define ENT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define ENT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define ENT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define ENT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define ENT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define ENT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define ENT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define ENT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define ENT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define ENT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define ENT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define ENT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define ENT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define ENT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define ENT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define ENT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define ENT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define ENT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define ENT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define ENT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define ENT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define ENT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define ENT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define ENT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define ENT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define ENT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define ENT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define ENT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define ENT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define ENT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define ENT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//IME_SUVD_CGC_CTRL +#define IME_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define IME_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define IME_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define IME_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define IME_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define IME_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define IME_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define IME_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define IME_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define IME_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define IME_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define IME_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define IME_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define IME_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define IME_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define IME_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define IME_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define IME_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define IME_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define IME_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define IME_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define IME_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define IME_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define IME_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define IME_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define IME_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define IME_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define IME_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define IME_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define IME_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define IME_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define IME_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define IME_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define IME_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define IME_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define IME_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define IME_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define IME_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define IME_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define IME_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define IME_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define IME_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//MPC1_SUVD_CGC_CTRL +#define MPC1_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define MPC1_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define MPC1_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define MPC1_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define MPC1_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define MPC1_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define MPC1_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define MPC1_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define MPC1_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define MPC1_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define MPC1_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define MPC1_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define MPC1_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define MPC1_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define MPC1_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define MPC1_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define MPC1_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define MPC1_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define MPC1_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define MPC1_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define MPC1_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define MPC1_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define MPC1_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define MPC1_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define MPC1_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define MPC1_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define MPC1_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define MPC1_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define MPC1_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define MPC1_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define MPC1_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define MPC1_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define MPC1_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define MPC1_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define MPC1_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define MPC1_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define MPC1_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define MPC1_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define MPC1_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define MPC1_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define MPC1_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define MPC1_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define MPC1_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define MPC1_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define MPC1_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define MPC1_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define MPC1_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define MPC1_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define MPC1_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define MPC1_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define MPC1_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define MPC1_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//PPU_SUVD_CGC_CTRL +#define PPU_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define PPU_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define PPU_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define PPU_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define PPU_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define PPU_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define PPU_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define PPU_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define PPU_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define PPU_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define PPU_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define PPU_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define PPU_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define PPU_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define PPU_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define PPU_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define PPU_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define PPU_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define PPU_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define PPU_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define PPU_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define PPU_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define PPU_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define PPU_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define PPU_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define PPU_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define PPU_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define PPU_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define PPU_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define PPU_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define PPU_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define PPU_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define PPU_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define PPU_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define PPU_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define PPU_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define PPU_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define PPU_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define PPU_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define PPU_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SAOE_SUVD_CGC_CTRL +#define SAOE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SAOE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SAOE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SAOE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SAOE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SAOE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SAOE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SAOE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SAOE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SAOE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SAOE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SAOE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SAOE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SAOE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SAOE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SAOE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SAOE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SAOE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SAOE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SAOE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SAOE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SAOE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SAOE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SAOE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SAOE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SAOE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SAOE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SAOE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SAOE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SAOE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SCM_SUVD_CGC_CTRL +#define SCM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SCM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SCM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SCM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SCM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SCM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SCM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SCM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SCM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SCM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SCM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SCM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SCM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SCM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SCM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SCM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SCM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SCM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SCM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SCM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SCM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SCM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SCM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SCM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SCM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SCM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SCM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SCM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SCM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SCM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SCM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SCM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SCM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SCM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SCM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SCM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SCM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SCM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SCM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SCM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SDB_SUVD_CGC_CTRL +#define SDB_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SDB_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SDB_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SDB_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SDB_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SDB_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SDB_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SDB_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SDB_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SDB_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SDB_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SDB_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SDB_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SDB_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SDB_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SDB_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SDB_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SDB_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SDB_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SDB_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SDB_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SDB_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SDB_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SDB_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SDB_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SDB_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SDB_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SDB_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SDB_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SDB_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SDB_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SDB_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SDB_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SDB_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SDB_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SDB_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SDB_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SDB_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SDB_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SDB_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT0_NXT_SUVD_CGC_CTRL +#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT1_NXT_SUVD_CGC_CTRL +#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT2_NXT_SUVD_CGC_CTRL +#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT_SUVD_CGC_CTRL +#define SIT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SMPA_SUVD_CGC_CTRL +#define SMPA_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SMPA_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SMPA_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SMPA_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SMPA_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SMPA_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SMPA_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SMPA_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SMPA_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SMPA_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SMPA_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SMPA_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SMPA_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SMPA_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SMPA_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SMPA_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SMPA_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SMPA_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SMPA_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SMPA_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SMPA_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SMPA_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SMPA_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SMPA_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SMPA_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SMPA_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SMPA_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SMPA_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SMPA_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SMPA_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SMP_SUVD_CGC_CTRL +#define SMP_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SMP_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SMP_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SMP_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SMP_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SMP_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SMP_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SMP_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SMP_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SMP_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SMP_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SMP_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SMP_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SMP_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SMP_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SMP_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SMP_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SMP_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SMP_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SMP_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SMP_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SMP_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SMP_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SMP_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SMP_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SMP_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SMP_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SMP_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SMP_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SMP_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SMP_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SMP_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SMP_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SMP_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SMP_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SMP_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SMP_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SMP_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SMP_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SMP_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SRE_SUVD_CGC_CTRL +#define SRE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SRE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SRE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SRE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SRE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SRE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SRE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SRE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SRE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SRE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SRE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SRE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SRE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SRE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SRE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SRE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SRE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SRE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SRE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SRE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SRE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SRE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SRE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SRE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SRE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SRE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SRE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SRE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SRE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SRE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SRE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SRE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SRE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SRE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SRE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SRE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SRE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SRE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SRE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SRE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//UVD_MPBE0_SUVD_CGC_CTRL +#define UVD_MPBE0_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define UVD_MPBE0_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define UVD_MPBE0_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define UVD_MPBE0_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define UVD_MPBE0_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define UVD_MPBE0_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define UVD_MPBE0_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define UVD_MPBE0_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define UVD_MPBE0_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define UVD_MPBE0_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define UVD_MPBE0_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define UVD_MPBE0_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define UVD_MPBE0_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define UVD_MPBE0_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define UVD_MPBE0_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define UVD_MPBE0_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define UVD_MPBE0_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define UVD_MPBE0_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define UVD_MPBE0_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define UVD_MPBE0_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define UVD_MPBE0_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define UVD_MPBE0_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define UVD_MPBE0_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define UVD_MPBE0_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define UVD_MPBE0_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define UVD_MPBE0_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//UVD_MPBE1_SUVD_CGC_CTRL +#define UVD_MPBE1_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define UVD_MPBE1_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define UVD_MPBE1_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define UVD_MPBE1_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define UVD_MPBE1_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define UVD_MPBE1_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define UVD_MPBE1_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define UVD_MPBE1_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define UVD_MPBE1_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define UVD_MPBE1_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define UVD_MPBE1_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define UVD_MPBE1_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define UVD_MPBE1_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define UVD_MPBE1_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define UVD_MPBE1_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define UVD_MPBE1_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define UVD_MPBE1_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define UVD_MPBE1_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define UVD_MPBE1_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define UVD_MPBE1_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define UVD_MPBE1_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define UVD_MPBE1_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define UVD_MPBE1_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define UVD_MPBE1_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define UVD_MPBE1_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define UVD_MPBE1_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//UVD_SUVD_CGC_CTRL +#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define UVD_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define UVD_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define UVD_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define UVD_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define UVD_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//UVD_CGC_CTRL3 +#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY__SHIFT 0x0 +#define UVD_CGC_CTRL3__LCM0_MODE__SHIFT 0xb +#define UVD_CGC_CTRL3__LCM1_MODE__SHIFT 0xc +#define UVD_CGC_CTRL3__MIF_MODE__SHIFT 0xd +#define UVD_CGC_CTRL3__VREG_MODE__SHIFT 0xe +#define UVD_CGC_CTRL3__PE_MODE__SHIFT 0xf +#define UVD_CGC_CTRL3__PPU_MODE__SHIFT 0x10 +#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY_MASK 0x000000FFL +#define UVD_CGC_CTRL3__LCM0_MODE_MASK 0x00000800L +#define UVD_CGC_CTRL3__LCM1_MODE_MASK 0x00001000L +#define UVD_CGC_CTRL3__MIF_MODE_MASK 0x00002000L +#define UVD_CGC_CTRL3__VREG_MODE_MASK 0x00004000L +#define UVD_CGC_CTRL3__PE_MODE_MASK 0x00008000L +#define UVD_CGC_CTRL3__PPU_MODE_MASK 0x00010000L +//UVD_GPCOM_VCPU_DATA0 +#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0 +#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_GPCOM_VCPU_DATA1 +#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0 +#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_GPCOM_SYS_CMD +#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0 +#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1 +#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f +#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L +#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL +#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L +//UVD_GPCOM_SYS_DATA0 +#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0 +#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_GPCOM_SYS_DATA1 +#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0 +#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_VCPU_INT_EN +#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0 +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1 +#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2 +#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3 +#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4 +#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5 +#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6 +#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7 +#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9 +#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa +#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb +#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe +#define UVD_VCPU_INT_EN__SUVD_EN__SHIFT 0xf +#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10 +#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11 +#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12 +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17 +#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18 +#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19 +#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a +#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b +#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c +#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d +#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e +#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f +#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L +#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L +#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L +#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L +#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L +#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L +#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L +#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L +#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L +#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L +#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L +#define UVD_VCPU_INT_EN__SUVD_EN_MASK 0x00008000L +#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L +#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L +#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L +#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L +#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L +#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L +#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L +#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L +#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L +#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L +#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L +//UVD_VCPU_INT_STATUS +#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0 +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1 +#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2 +#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT__SHIFT 0x3 +#define UVD_VCPU_INT_STATUS__SW_RB1_INT__SHIFT 0x4 +#define UVD_VCPU_INT_STATUS__SW_RB2_INT__SHIFT 0x5 +#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6 +#define UVD_VCPU_INT_STATUS__SW_RB3_INT__SHIFT 0x7 +#define UVD_VCPU_INT_STATUS__SW_RB4_INT__SHIFT 0x9 +#define UVD_VCPU_INT_STATUS__SW_RB5_INT__SHIFT 0xa +#define UVD_VCPU_INT_STATUS__LBSI_INT__SHIFT 0xb +#define UVD_VCPU_INT_STATUS__UDEC_INT__SHIFT 0xc +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe +#define UVD_VCPU_INT_STATUS__SUVD_INT__SHIFT 0xf +#define UVD_VCPU_INT_STATUS__RPTR_WR_INT__SHIFT 0x10 +#define UVD_VCPU_INT_STATUS__JOB_START_INT__SHIFT 0x11 +#define UVD_VCPU_INT_STATUS__NJ_PF_INT__SHIFT 0x12 +#define UVD_VCPU_INT_STATUS__GPCOM_INT__SHIFT 0x14 +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17 +#define UVD_VCPU_INT_STATUS__IDCT_INT__SHIFT 0x18 +#define UVD_VCPU_INT_STATUS__MPRD_INT__SHIFT 0x19 +#define UVD_VCPU_INT_STATUS__AVM_INT__SHIFT 0x1a +#define UVD_VCPU_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b +#define UVD_VCPU_INT_STATUS__MIF_HWINT__SHIFT 0x1c +#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d +#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT__SHIFT 0x1e +#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT__SHIFT 0x1f +#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L +#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L +#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT_MASK 0x00000008L +#define UVD_VCPU_INT_STATUS__SW_RB1_INT_MASK 0x00000010L +#define UVD_VCPU_INT_STATUS__SW_RB2_INT_MASK 0x00000020L +#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L +#define UVD_VCPU_INT_STATUS__SW_RB3_INT_MASK 0x00000080L +#define UVD_VCPU_INT_STATUS__SW_RB4_INT_MASK 0x00000200L +#define UVD_VCPU_INT_STATUS__SW_RB5_INT_MASK 0x00000400L +#define UVD_VCPU_INT_STATUS__LBSI_INT_MASK 0x00000800L +#define UVD_VCPU_INT_STATUS__UDEC_INT_MASK 0x00001000L +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L +#define UVD_VCPU_INT_STATUS__SUVD_INT_MASK 0x00008000L +#define UVD_VCPU_INT_STATUS__RPTR_WR_INT_MASK 0x00010000L +#define UVD_VCPU_INT_STATUS__JOB_START_INT_MASK 0x00020000L +#define UVD_VCPU_INT_STATUS__NJ_PF_INT_MASK 0x00040000L +#define UVD_VCPU_INT_STATUS__GPCOM_INT_MASK 0x00100000L +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L +#define UVD_VCPU_INT_STATUS__IDCT_INT_MASK 0x01000000L +#define UVD_VCPU_INT_STATUS__MPRD_INT_MASK 0x02000000L +#define UVD_VCPU_INT_STATUS__AVM_INT_MASK 0x04000000L +#define UVD_VCPU_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L +#define UVD_VCPU_INT_STATUS__MIF_HWINT_MASK 0x10000000L +#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L +#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT_MASK 0x40000000L +#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT_MASK 0x80000000L +//UVD_VCPU_INT_ACK +#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0 +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1 +#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2 +#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3 +#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4 +#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5 +#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6 +#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7 +#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9 +#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa +#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb +#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe +#define UVD_VCPU_INT_ACK__SUVD_ACK__SHIFT 0xf +#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10 +#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11 +#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12 +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17 +#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18 +#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19 +#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a +#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b +#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c +#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d +#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e +#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f +#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L +#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L +#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L +#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L +#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L +#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L +#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L +#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L +#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L +#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L +#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L +#define UVD_VCPU_INT_ACK__SUVD_ACK_MASK 0x00008000L +#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L +#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L +#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L +#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L +#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L +#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L +#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L +#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L +#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L +#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L +#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L +//UVD_VCPU_INT_ROUTE +#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0 +#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1 +#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2 +#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L +#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L +#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L +//UVD_DRV_FW_MSG +#define UVD_DRV_FW_MSG__MSG__SHIFT 0x0 +#define UVD_DRV_FW_MSG__MSG_MASK 0xFFFFFFFFL +//UVD_FW_DRV_MSG_ACK +#define UVD_FW_DRV_MSG_ACK__ACK__SHIFT 0x0 +#define UVD_FW_DRV_MSG_ACK__ACK_MASK 0x00000001L +//UVD_SUVD_INT_EN +#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN__SHIFT 0x0 +#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN__SHIFT 0x5 +#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN__SHIFT 0x6 +#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN__SHIFT 0xb +#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN__SHIFT 0xc +#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN__SHIFT 0x11 +#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN__SHIFT 0x12 +#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN__SHIFT 0x17 +#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN__SHIFT 0x18 +#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN__SHIFT 0x1d +#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN__SHIFT 0x1e +#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN_MASK 0x0000001FL +#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN_MASK 0x00000020L +#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN_MASK 0x000007C0L +#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN_MASK 0x00000800L +#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN_MASK 0x0001F000L +#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN_MASK 0x00020000L +#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN_MASK 0x007C0000L +#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN_MASK 0x00800000L +#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN_MASK 0x1F000000L +#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN_MASK 0x20000000L +#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN_MASK 0x40000000L +//UVD_SUVD_INT_STATUS +#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT__SHIFT 0x0 +#define UVD_SUVD_INT_STATUS__SRE_ERR_INT__SHIFT 0x5 +#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT__SHIFT 0x6 +#define UVD_SUVD_INT_STATUS__SIT_ERR_INT__SHIFT 0xb +#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT__SHIFT 0xc +#define UVD_SUVD_INT_STATUS__SMP_ERR_INT__SHIFT 0x11 +#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT__SHIFT 0x12 +#define UVD_SUVD_INT_STATUS__SCM_ERR_INT__SHIFT 0x17 +#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT__SHIFT 0x18 +#define UVD_SUVD_INT_STATUS__SDB_ERR_INT__SHIFT 0x1d +#define UVD_SUVD_INT_STATUS__FBC_ERR_INT__SHIFT 0x1e +#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT_MASK 0x0000001FL +#define UVD_SUVD_INT_STATUS__SRE_ERR_INT_MASK 0x00000020L +#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT_MASK 0x000007C0L +#define UVD_SUVD_INT_STATUS__SIT_ERR_INT_MASK 0x00000800L +#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT_MASK 0x0001F000L +#define UVD_SUVD_INT_STATUS__SMP_ERR_INT_MASK 0x00020000L +#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT_MASK 0x007C0000L +#define UVD_SUVD_INT_STATUS__SCM_ERR_INT_MASK 0x00800000L +#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT_MASK 0x1F000000L +#define UVD_SUVD_INT_STATUS__SDB_ERR_INT_MASK 0x20000000L +#define UVD_SUVD_INT_STATUS__FBC_ERR_INT_MASK 0x40000000L +//UVD_SUVD_INT_ACK +#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK__SHIFT 0x0 +#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK__SHIFT 0x5 +#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK__SHIFT 0x6 +#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK__SHIFT 0xb +#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK__SHIFT 0xc +#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK__SHIFT 0x11 +#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK__SHIFT 0x12 +#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK__SHIFT 0x17 +#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK__SHIFT 0x18 +#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK__SHIFT 0x1d +#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK__SHIFT 0x1e +#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK_MASK 0x0000001FL +#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK_MASK 0x00000020L +#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK_MASK 0x000007C0L +#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK_MASK 0x00000800L +#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK_MASK 0x0001F000L +#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK_MASK 0x00020000L +#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK_MASK 0x007C0000L +#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK_MASK 0x00800000L +#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK_MASK 0x1F000000L +#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK_MASK 0x20000000L +#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK_MASK 0x40000000L +//UVD_ENC_VCPU_INT_EN +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L +//UVD_ENC_VCPU_INT_STATUS +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT_MASK 0x00000004L +//UVD_ENC_VCPU_INT_ACK +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L +//UVD_MASTINT_EN +#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1 +#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2 +#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L +#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L +#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x00FFFFF0L +//UVD_SYS_INT_EN +#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0 +#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1 +#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2 +#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3 +#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6 +#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb +#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe +#define UVD_SYS_INT_EN__SUVD_EN__SHIFT 0xf +#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10 +#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17 +#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18 +#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19 +#define UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN__SHIFT 0x1a +#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b +#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c +#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d +#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f +#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L +#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L +#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L +#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L +#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L +#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L +#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L +#define UVD_SYS_INT_EN__SUVD_EN_MASK 0x00008000L +#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L +#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L +#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L +#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L +#define UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK 0x04000000L +#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L +#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L +#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L +#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L +//UVD_SYS_INT_STATUS +#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0 +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1 +#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2 +#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3 +#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6 +#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb +#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe +#define UVD_SYS_INT_STATUS__SUVD_INT__SHIFT 0xf +#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10 +#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12 +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17 +#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18 +#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19 +#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b +#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c +#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d +#define UVD_SYS_INT_STATUS__RASCNTL_VCPU_VCODEC_INT__SHIFT 0x1e +#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f +#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L +#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L +#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L +#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L +#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L +#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L +#define UVD_SYS_INT_STATUS__SUVD_INT_MASK 0x00008000L +#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L +#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L +#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L +#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L +#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L +#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L +#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L +#define UVD_SYS_INT_STATUS__RASCNTL_VCPU_VCODEC_INT_MASK 0x40000000L +#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L +//UVD_SYS_INT_ACK +#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0 +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1 +#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2 +#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3 +#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6 +#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb +#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe +#define UVD_SYS_INT_ACK__SUVD_ACK__SHIFT 0xf +#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10 +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17 +#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18 +#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19 +#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b +#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c +#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d +#define UVD_SYS_INT_ACK__RASCNTL_VCPU_VCODEC_ACK__SHIFT 0x1e +#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f +#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L +#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L +#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L +#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L +#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L +#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L +#define UVD_SYS_INT_ACK__SUVD_ACK_MASK 0x00008000L +#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L +#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L +#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L +#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L +#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L +#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L +#define UVD_SYS_INT_ACK__RASCNTL_VCPU_VCODEC_ACK_MASK 0x40000000L +#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L +//UVD_JOB_DONE +#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0 +#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L +//UVD_CBUF_ID +#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0 +#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL +//UVD_CONTEXT_ID +#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0 +#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL +//UVD_CONTEXT_ID2 +#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0 +#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL +//UVD_NO_OP +#define UVD_NO_OP__NO_OP__SHIFT 0x0 +#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL +//UVD_RB_BASE_LO +#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI +#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE +#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_BASE_LO2 +#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI2 +#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE2 +#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_BASE_LO3 +#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI3 +#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE3 +#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_BASE_LO4 +#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI4 +#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE4 +#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L +//UVD_OUT_RB_BASE_LO +#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_OUT_RB_BASE_HI +#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_OUT_RB_SIZE +#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_IOV_ACTIVE_FCN_ID +#define UVD_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0 +#define UVD_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f +#define UVD_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000003FL +#define UVD_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L +//UVD_IOV_MAILBOX +#define UVD_IOV_MAILBOX__MAILBOX__SHIFT 0x0 +#define UVD_IOV_MAILBOX__MAILBOX_MASK 0xFFFFFFFFL +//UVD_IOV_MAILBOX_RESP +#define UVD_IOV_MAILBOX_RESP__RESP__SHIFT 0x0 +#define UVD_IOV_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL +//UVD_RB_ARB_CTRL +#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0 +#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1 +#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2 +#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3 +#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4 +#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5 +#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6 +#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7 +#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8 +#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN__SHIFT 0x9 +#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L +#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L +#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L +#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L +#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L +#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L +#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L +#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L +#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L +#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN_MASK 0x00000200L +//UVD_CTX_INDEX +#define UVD_CTX_INDEX__INDEX__SHIFT 0x0 +#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL +//UVD_CTX_DATA +#define UVD_CTX_DATA__DATA__SHIFT 0x0 +#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL +//UVD_CXW_WR +#define UVD_CXW_WR__DAT__SHIFT 0x0 +#define UVD_CXW_WR__STAT__SHIFT 0x1f +#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL +#define UVD_CXW_WR__STAT_MASK 0x80000000L +//UVD_CXW_WR_INT_ID +#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0 +#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL +//UVD_CXW_WR_INT_CTX_ID +#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0 +#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL +//UVD_CXW_INT_ID +#define UVD_CXW_INT_ID__ID__SHIFT 0x0 +#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL +//UVD_MPEG2_ERROR +#define UVD_MPEG2_ERROR__STATUS__SHIFT 0x0 +#define UVD_MPEG2_ERROR__STATUS_MASK 0xFFFFFFFFL +//UVD_YBASE +#define UVD_YBASE__DUM__SHIFT 0x0 +#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL +//UVD_UVBASE +#define UVD_UVBASE__DUM__SHIFT 0x0 +#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL +//UVD_PITCH +#define UVD_PITCH__DUM__SHIFT 0x0 +#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL +//UVD_WIDTH +#define UVD_WIDTH__DUM__SHIFT 0x0 +#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL +//UVD_HEIGHT +#define UVD_HEIGHT__DUM__SHIFT 0x0 +#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL +//UVD_PICCOUNT +#define UVD_PICCOUNT__DUM__SHIFT 0x0 +#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL +//UVD_MPRD_INITIAL_XY +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X__SHIFT 0x0 +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y__SHIFT 0x10 +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X_MASK 0x00000FFFL +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y_MASK 0x0FFF0000L +//UVD_MPEG2_CTRL +#define UVD_MPEG2_CTRL__EN__SHIFT 0x0 +#define UVD_MPEG2_CTRL__TRICK_MODE__SHIFT 0x1 +#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB__SHIFT 0x10 +#define UVD_MPEG2_CTRL__EN_MASK 0x00000001L +#define UVD_MPEG2_CTRL__TRICK_MODE_MASK 0x00000002L +#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB_MASK 0xFFFF0000L +//UVD_MB_CTL_BUF_BASE +#define UVD_MB_CTL_BUF_BASE__BASE__SHIFT 0x0 +#define UVD_MB_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL +//UVD_PIC_CTL_BUF_BASE +#define UVD_PIC_CTL_BUF_BASE__BASE__SHIFT 0x0 +#define UVD_PIC_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL +//UVD_DXVA_BUF_SIZE +#define UVD_DXVA_BUF_SIZE__PIC_SIZE__SHIFT 0x0 +#define UVD_DXVA_BUF_SIZE__MB_SIZE__SHIFT 0x10 +#define UVD_DXVA_BUF_SIZE__PIC_SIZE_MASK 0x0000FFFFL +#define UVD_DXVA_BUF_SIZE__MB_SIZE_MASK 0xFFFF0000L +//UVD_SCRATCH_NP +#define UVD_SCRATCH_NP__DATA__SHIFT 0x0 +#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL +//UVD_CLK_SWT_HANDSHAKE +#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE__SHIFT 0x0 +#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT__SHIFT 0x8 +#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE_MASK 0x00000003L +#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT_MASK 0x00000300L +//UVD_GP_SCRATCH0 +#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH1 +#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH2 +#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH3 +#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH4 +#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH5 +#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH6 +#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH7 +#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH8 +#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH9 +#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH10 +#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH11 +#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH12 +#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH13 +#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH14 +#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH15 +#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH16 +#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH17 +#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH18 +#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH19 +#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH20 +#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH21 +#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH22 +#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH23 +#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL +//UVD_AUDIO_RB_BASE_LO +#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_AUDIO_RB_BASE_HI +#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_AUDIO_RB_SIZE +#define UVD_AUDIO_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_AUDIO_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_VCPU_INT_STATUS2 +#define UVD_VCPU_INT_STATUS2__SW_RB6_INT__SHIFT 0x0 +#define UVD_VCPU_INT_STATUS2__RASCNTL_VCPU_VCODEC_INT__SHIFT 0x15 +#define UVD_VCPU_INT_STATUS2__SW_RB6_INT_MASK 0x00000001L +#define UVD_VCPU_INT_STATUS2__RASCNTL_VCPU_VCODEC_INT_MASK 0x00200000L +//UVD_VCPU_INT_ACK2 +#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK__SHIFT 0x0 +#define UVD_VCPU_INT_ACK2__RASCNTL_VCPU_VCODEC_ACK__SHIFT 0x16 +#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK_MASK 0x00000001L +#define UVD_VCPU_INT_ACK2__RASCNTL_VCPU_VCODEC_ACK_MASK 0x00400000L +//UVD_VCPU_INT_EN2 +#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN__SHIFT 0x0 +#define UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN__SHIFT 0x1 +#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN_MASK 0x00000001L +#define UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK 0x00000002L +//UVD_SUVD_CGC_STATUS2 +#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0 +#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1 +#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3 +#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4 +#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5 +#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6 +#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7 +#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8 +#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK__SHIFT 0x9 +#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK__SHIFT 0xa +#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK__SHIFT 0xb +#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK__SHIFT 0xc +#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK__SHIFT 0xd +#define UVD_SUVD_CGC_STATUS2__FBC_PCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_STATUS2__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L +#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L +#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L +#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L +#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L +#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L +#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L +#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L +#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK_MASK 0x00000200L +#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK_MASK 0x00000400L +#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK_MASK 0x00000800L +#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK_MASK 0x00001000L +#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK_MASK 0x00002000L +#define UVD_SUVD_CGC_STATUS2__FBC_PCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_STATUS2__FBC_CCLK_MASK 0x20000000L +//UVD_SUVD_INT_STATUS2 +#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0 +#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5 +#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6 +#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb +#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL +#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L +#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L +#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L +//UVD_SUVD_INT_EN2 +#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0 +#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5 +#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6 +#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb +#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL +#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L +#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L +#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L +//UVD_SUVD_INT_ACK2 +#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0 +#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5 +#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6 +#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb +#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL +#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L +#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L +#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L +//UVD_STATUS +#define UVD_STATUS__RBC_BUSY__SHIFT 0x0 +#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1 +#define UVD_STATUS__FILL_0__SHIFT 0x8 +#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10 +#define UVD_STATUS__DRM_BUSY__SHIFT 0x11 +#define UVD_STATUS__FILL_1__SHIFT 0x12 +#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f +#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L +#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL +#define UVD_STATUS__FILL_0_MASK 0x0000FF00L +#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L +#define UVD_STATUS__DRM_BUSY_MASK 0x00020000L +#define UVD_STATUS__FILL_1_MASK 0x7FFC0000L +#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L +//UVD_ENC_PIPE_BUSY +#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0 +#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1 +#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2 +#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3 +#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4 +#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5 +#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6 +#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7 +#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8 +#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9 +#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa +#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb +#define UVD_ENC_PIPE_BUSY__EFC_BUSY__SHIFT 0xc +#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY__SHIFT 0xd +#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY__SHIFT 0xe +#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY__SHIFT 0xf +#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10 +#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11 +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12 +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13 +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14 +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15 +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16 +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e +#define UVD_ENC_PIPE_BUSY__SAOE_BUSY__SHIFT 0x1f +#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L +#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L +#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L +#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L +#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L +#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L +#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L +#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L +#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L +#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L +#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L +#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L +#define UVD_ENC_PIPE_BUSY__EFC_BUSY_MASK 0x00001000L +#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY_MASK 0x00002000L +#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY_MASK 0x00004000L +#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY_MASK 0x00008000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L +#define UVD_ENC_PIPE_BUSY__SAOE_BUSY_MASK 0x80000000L +//UVD_FW_POWER_STATUS +#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF__SHIFT 0x0 +#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF__SHIFT 0x1 +#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF__SHIFT 0x2 +#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF__SHIFT 0x3 +#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF__SHIFT 0x4 +#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF__SHIFT 0x5 +#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF__SHIFT 0x6 +#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF__SHIFT 0x7 +#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF__SHIFT 0x8 +#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF__SHIFT 0x9 +#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF__SHIFT 0xa +#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF_MASK 0x00000001L +#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF_MASK 0x00000002L +#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF_MASK 0x00000004L +#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF_MASK 0x00000008L +#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF_MASK 0x00000010L +#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF_MASK 0x00000020L +#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF_MASK 0x00000040L +#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF_MASK 0x00000080L +#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF_MASK 0x00000100L +#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF_MASK 0x00000200L +#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF_MASK 0x00000400L +//UVD_CNTL +#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11 +#define UVD_CNTL__SUVD_EN__SHIFT 0x13 +#define UVD_CNTL__CABAC_MB_ACC__SHIFT 0x1c +#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS__SHIFT 0x1f +#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L +#define UVD_CNTL__SUVD_EN_MASK 0x00080000L +#define UVD_CNTL__CABAC_MB_ACC_MASK 0x10000000L +#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS_MASK 0x80000000L +//UVD_SOFT_RESET +#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0 +#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1 +#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2 +#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3 +#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4 +#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6 +#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7 +#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8 +#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9 +#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa +#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb +#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc +#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd +#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe +#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf +#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10 +#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11 +#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12 +#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13 +#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14 +#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15 +#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16 +#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17 +#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18 +#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19 +#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a +#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b +#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c +#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d +#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e +#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f +#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L +#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L +#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L +#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L +#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L +#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L +#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L +#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L +#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L +#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L +#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L +#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L +#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L +#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L +#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L +#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L +#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L +#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L +#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L +#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L +#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L +#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L +#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L +#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L +#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L +#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L +#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L +#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L +#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L +#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L +#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L +//UVD_SOFT_RESET2 +#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0 +#define UVD_SOFT_RESET2__PPU_SOFT_RESET__SHIFT 0x1 +#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10 +#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L +#define UVD_SOFT_RESET2__PPU_SOFT_RESET_MASK 0x00000002L +#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L +#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_MMSCH_SOFT_RESET +#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0 +#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1 +#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f +#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L +#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L +#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L +//UVD_WIG_CTRL +#define UVD_WIG_CTRL__AVM_SOFT_RESET__SHIFT 0x0 +#define UVD_WIG_CTRL__ACAP_SOFT_RESET__SHIFT 0x1 +#define UVD_WIG_CTRL__WIG_SOFT_RESET__SHIFT 0x2 +#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON__SHIFT 0x3 +#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON__SHIFT 0x4 +#define UVD_WIG_CTRL__AVM_SOFT_RESET_MASK 0x00000001L +#define UVD_WIG_CTRL__ACAP_SOFT_RESET_MASK 0x00000002L +#define UVD_WIG_CTRL__WIG_SOFT_RESET_MASK 0x00000004L +#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON_MASK 0x00000008L +#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON_MASK 0x00000010L +//UVD_CGC_STATUS +#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0 +#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1 +#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2 +#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3 +#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4 +#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5 +#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6 +#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7 +#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8 +#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9 +#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa +#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb +#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc +#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd +#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe +#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf +#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10 +#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11 +#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12 +#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13 +#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14 +#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15 +#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16 +#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17 +#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18 +#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19 +#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a +#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b +#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c +#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d +#define UVD_CGC_STATUS__LRBBM_DCLK__SHIFT 0x1e +#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f +#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L +#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L +#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L +#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L +#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L +#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L +#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L +#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L +#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L +#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L +#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L +#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L +#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L +#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L +#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L +#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L +#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L +#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L +#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L +#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L +#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L +#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L +#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L +#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L +#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L +#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L +#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L +#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L +#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L +#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L +#define UVD_CGC_STATUS__LRBBM_DCLK_MASK 0x40000000L +#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L +//UVD_CGC_UDEC_STATUS +#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0 +#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1 +#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2 +#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3 +#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4 +#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5 +#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6 +#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7 +#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8 +#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9 +#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa +#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb +#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc +#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd +#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe +#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L +#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L +#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L +#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L +#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L +#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L +#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L +#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L +#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L +#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L +#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L +#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L +#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L +#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L +#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L +//UVD_SUVD_CGC_STATUS +#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0 +#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1 +#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2 +#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3 +#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4 +#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5 +#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6 +#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7 +#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9 +#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa +#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb +#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc +#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd +#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe +#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf +#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10 +#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13 +#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14 +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15 +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16 +#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17 +#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18 +#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19 +#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a +#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b +#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e +#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f +#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L +#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L +#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L +#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L +#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L +#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L +#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L +#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L +#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L +#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L +#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L +#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L +#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L +#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L +#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L +#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L +#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L +#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L +#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L +#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L +#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L +#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L +#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L +#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L +#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L +//UVD_GPCOM_VCPU_CMD +#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0 +#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1 +#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f +#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L +#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL +#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L + + +// addressBlock: aid_uvd0_ecpudec +//UVD_VCPU_CACHE_OFFSET0 +#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE0 +#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET1 +#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE1 +#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET2 +#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE2 +#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET3 +#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE3 +#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET4 +#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE4 +#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET5 +#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE5 +#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET6 +#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE6 +#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET7 +#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE7 +#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET8 +#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE8 +#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL +//UVD_VCPU_NONCACHE_OFFSET0 +#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL +//UVD_VCPU_NONCACHE_SIZE0 +#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL +//UVD_VCPU_NONCACHE_OFFSET1 +#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL +//UVD_VCPU_NONCACHE_SIZE1 +#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL +//UVD_VCPU_CNTL +#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0 +#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x4 +#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5 +#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6 +#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7 +#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8 +#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9 +#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa +#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb +#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0xd +#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10 +#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12 +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14 +#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c +#define UVD_VCPU_CNTL__RUNSTALL__SHIFT 0x1d +#define UVD_VCPU_CNTL__SRE_CMDIF_DRST__SHIFT 0x1e +#define UVD_VCPU_CNTL__SRE_CMDIF_VRST__SHIFT 0x1f +#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL +#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L +#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L +#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L +#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L +#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L +#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L +#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L +#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L +#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000E000L +#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L +#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L +#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L +#define UVD_VCPU_CNTL__RUNSTALL_MASK 0x20000000L +#define UVD_VCPU_CNTL__SRE_CMDIF_DRST_MASK 0x40000000L +#define UVD_VCPU_CNTL__SRE_CMDIF_VRST_MASK 0x80000000L +//UVD_VCPU_PRID +#define UVD_VCPU_PRID__PRID__SHIFT 0x0 +#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL +//UVD_VCPU_TRCE +#define UVD_VCPU_TRCE__PC__SHIFT 0x0 +#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL +//UVD_VCPU_TRCE_RD +#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0 +#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL +//UVD_VCPU_IND_INDEX +#define UVD_VCPU_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_VCPU_IND_INDEX__INDEX_MASK 0x000001FFL +//UVD_VCPU_IND_DATA +#define UVD_VCPU_IND_DATA__DATA__SHIFT 0x0 +#define UVD_VCPU_IND_DATA__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_mpcdec +//UVD_MP_SWAP_CNTL +#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x0 +#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x2 +#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x4 +#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x6 +#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x8 +#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0xa +#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0xc +#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0xe +#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x10 +#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x12 +#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x14 +#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x16 +#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x18 +#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x1a +#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x1c +#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x1e +#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L +#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000CL +#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L +#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000C0L +#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L +#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000C00L +#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L +#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000C000L +#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L +#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000C0000L +#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L +#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00C00000L +#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L +#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0C000000L +#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L +#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xC0000000L +//UVD_MP_SWAP_CNTL2 +#define UVD_MP_SWAP_CNTL2__MP_REF16_MC_SWAP__SHIFT 0x0 +#define UVD_MP_SWAP_CNTL2__MP_REF16_MC_SWAP_MASK 0x00000003L +//UVD_MPC_LUMA_SRCH +#define UVD_MPC_LUMA_SRCH__CNTR__SHIFT 0x0 +#define UVD_MPC_LUMA_SRCH__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_LUMA_HIT +#define UVD_MPC_LUMA_HIT__CNTR__SHIFT 0x0 +#define UVD_MPC_LUMA_HIT__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_LUMA_HITPEND +#define UVD_MPC_LUMA_HITPEND__CNTR__SHIFT 0x0 +#define UVD_MPC_LUMA_HITPEND__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CHROMA_SRCH +#define UVD_MPC_CHROMA_SRCH__CNTR__SHIFT 0x0 +#define UVD_MPC_CHROMA_SRCH__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CHROMA_HIT +#define UVD_MPC_CHROMA_HIT__CNTR__SHIFT 0x0 +#define UVD_MPC_CHROMA_HIT__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CHROMA_HITPEND +#define UVD_MPC_CHROMA_HITPEND__CNTR__SHIFT 0x0 +#define UVD_MPC_CHROMA_HITPEND__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CNTL +#define UVD_MPC_CNTL__BLK_RST__SHIFT 0x0 +#define UVD_MPC_CNTL__REG_MPC1_PERF_SELECT__SHIFT 0x1 +#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3 +#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x6 +#define UVD_MPC_CNTL__REG_MPC_CNTL_BACKWARD_COMPATIBILITY__SHIFT 0x7 +#define UVD_MPC_CNTL__DBG_MUX__SHIFT 0x8 +#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x10 +#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x12 +#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP__SHIFT 0x13 +#define UVD_MPC_CNTL__TEST_MODE_EN__SHIFT 0x14 +#define UVD_MPC_CNTL__BLK_RST_MASK 0x00000001L +#define UVD_MPC_CNTL__REG_MPC1_PERF_SELECT_MASK 0x00000002L +#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L +#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L +#define UVD_MPC_CNTL__REG_MPC_CNTL_BACKWARD_COMPATIBILITY_MASK 0x00000080L +#define UVD_MPC_CNTL__DBG_MUX_MASK 0x00000F00L +#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L +#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L +#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP_MASK 0x00080000L +#define UVD_MPC_CNTL__TEST_MODE_EN_MASK 0x00300000L +//UVD_MPC_PITCH +#define UVD_MPC_PITCH__LUMA_PITCH__SHIFT 0x0 +#define UVD_MPC_PITCH__LUMA_PITCH_MASK 0x000007FFL +//UVD_MPC_SET_MUXA0 +#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0 +#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6 +#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0xc +#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x12 +#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x18 +#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003FL +#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003F000L +#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00FC0000L +#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3F000000L +//UVD_MPC_SET_MUXA1 +#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x0 +#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x6 +#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0xc +#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003FL +#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003F000L +//UVD_MPC_SET_MUXB0 +#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x0 +#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x6 +#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0xc +#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x12 +#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x18 +#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003FL +#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003F000L +#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00FC0000L +#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3F000000L +//UVD_MPC_SET_MUXB1 +#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x0 +#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x6 +#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0xc +#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003FL +#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003F000L +//UVD_MPC_SET_MUX +#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x0 +#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x3 +#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x6 +#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L +#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L +#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001C0L +//UVD_MPC_SET_ALU +#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x0 +#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x4 +#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L +#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000FF0L +//UVD_MPC_PERF0 +#define UVD_MPC_PERF0__MAX_LAT__SHIFT 0x0 +#define UVD_MPC_PERF0__MAX_LAT_MASK 0x000003FFL +//UVD_MPC_PERF1 +#define UVD_MPC_PERF1__AVE_LAT__SHIFT 0x0 +#define UVD_MPC_PERF1__AVE_LAT_MASK 0x000003FFL +//UVD_MPC_IND_INDEX +#define UVD_MPC_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_MPC_IND_INDEX__INDEX_MASK 0x000001FFL +//UVD_MPC_IND_DATA +#define UVD_MPC_IND_DATA__DATA__SHIFT 0x0 +#define UVD_MPC_IND_DATA__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_rbcdec +//UVD_RBC_IB_SIZE +#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_RBC_IB_SIZE_UPDATE +#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_RBC_RB_CNTL +#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x0 +#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x8 +#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x10 +#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x14 +#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x18 +#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1c +#define UVD_RBC_RB_CNTL__BLK_RST__SHIFT 0x1d +#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001FL +#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001F00L +#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L +#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L +#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L +#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L +#define UVD_RBC_RB_CNTL__BLK_RST_MASK 0x20000000L +//UVD_RBC_RB_RPTR_ADDR +#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x0 +#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFFL +//UVD_RBC_VCPU_ACCESS +#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC__SHIFT 0x0 +#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC_MASK 0x00000001L +//UVD_FW_SEMAPHORE_CNTL +#define UVD_FW_SEMAPHORE_CNTL__START__SHIFT 0x0 +#define UVD_FW_SEMAPHORE_CNTL__BUSY__SHIFT 0x8 +#define UVD_FW_SEMAPHORE_CNTL__PASS__SHIFT 0x9 +#define UVD_FW_SEMAPHORE_CNTL__START_MASK 0x00000001L +#define UVD_FW_SEMAPHORE_CNTL__BUSY_MASK 0x00000100L +#define UVD_FW_SEMAPHORE_CNTL__PASS_MASK 0x00000200L +//UVD_RBC_READ_REQ_URGENT_CNTL +#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_RBC_RB_WPTR_CNTL +#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x0 +#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER_MASK 0x00007FFFL +//UVD_RBC_WPTR_STATUS +#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE__SHIFT 0x4 +#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE_MASK 0x007FFFF0L +//UVD_RBC_WPTR_POLL_CNTL +#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ__SHIFT 0x0 +#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ_MASK 0x0000FFFFL +#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//UVD_RBC_WPTR_POLL_ADDR +#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR__SHIFT 0x2 +#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR_MASK 0xFFFFFFFCL +//UVD_SEMA_CMD +#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x0 +#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x4 +#define UVD_SEMA_CMD__MODE__SHIFT 0x6 +#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x7 +#define UVD_SEMA_CMD__VMID__SHIFT 0x8 +#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000FL +#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L +#define UVD_SEMA_CMD__MODE_MASK 0x00000040L +#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L +#define UVD_SEMA_CMD__VMID_MASK 0x00000F00L +//UVD_SEMA_ADDR_LOW +#define UVD_SEMA_ADDR_LOW__ADDR_26_3__SHIFT 0x0 +#define UVD_SEMA_ADDR_LOW__ADDR_26_3_MASK 0x00FFFFFFL +//UVD_SEMA_ADDR_HIGH +#define UVD_SEMA_ADDR_HIGH__ADDR_47_27__SHIFT 0x0 +#define UVD_SEMA_ADDR_HIGH__ADDR_47_27_MASK 0x001FFFFFL +//UVD_ENGINE_CNTL +#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0 +#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1 +#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE__SHIFT 0x2 +#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L +#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L +#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE_MASK 0x00000004L +//UVD_SEMA_TIMEOUT_STATUS +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x0 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x1 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x2 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x3 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L +//UVD_SEMA_CNTL +#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x0 +#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x1 +#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L +#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L +//UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x0 +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x1 +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18 +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001FFFFEL +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L +//UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x0 +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x1 +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18 +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001FFFFEL +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L +//UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x0 +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x1 +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18 +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001FFFFEL +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L +//UVD_JOB_START +#define UVD_JOB_START__JOB_START__SHIFT 0x0 +#define UVD_JOB_START__JOB_START_MASK 0x00000001L +//UVD_RBC_BUF_STATUS +#define UVD_RBC_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_RBC_BUF_STATUS__IB_BUF_VALID__SHIFT 0x8 +#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x13 +#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x16 +#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x19 +#define UVD_RBC_BUF_STATUS__RB_BUF_VALID_MASK 0x000000FFL +#define UVD_RBC_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FF00L +#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x00070000L +#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x00380000L +#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x01C00000L +#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x0E000000L +//UVD_RBC_SWAP_CNTL +#define UVD_RBC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_RBC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_RBC_SWAP_CNTL__RB_RPTR_MC_SWAP__SHIFT 0x4 +#define UVD_RBC_SWAP_CNTL__RB_WR_MC_SWAP__SHIFT 0x1a +#define UVD_RBC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_RBC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_RBC_SWAP_CNTL__RB_RPTR_MC_SWAP_MASK 0x00000030L +#define UVD_RBC_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0C000000L + + +// addressBlock: aid_uvd0_lmi_adpdec +//UVD_LMI_RE_64BIT_BAR_LOW +#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RE_64BIT_BAR_HIGH +#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_IT_64BIT_BAR_LOW +#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_IT_64BIT_BAR_HIGH +#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MP_64BIT_BAR_LOW +#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MP_64BIT_BAR_HIGH +#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_CM_64BIT_BAR_LOW +#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_CM_64BIT_BAR_HIGH +#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_DB_64BIT_BAR_LOW +#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_DB_64BIT_BAR_HIGH +#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_DBW_64BIT_BAR_LOW +#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_DBW_64BIT_BAR_HIGH +#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_IDCT_64BIT_BAR_LOW +#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_IDCT_64BIT_BAR_HIGH +#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S0_64BIT_BAR_LOW +#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S0_64BIT_BAR_HIGH +#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S1_64BIT_BAR_LOW +#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S1_64BIT_BAR_HIGH +#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_DBW_64BIT_BAR_LOW +#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH +#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPC_64BIT_BAR_LOW +#define UVD_LMI_MPC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPC_64BIT_BAR_HIGH +#define UVD_LMI_MPC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_RB_64BIT_BAR_LOW +#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_RB_64BIT_BAR_HIGH +#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_IB_64BIT_BAR_LOW +#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_IB_64BIT_BAR_HIGH +#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_LBSI_64BIT_BAR_LOW +#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_LBSI_64BIT_BAR_HIGH +#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW +#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW +#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_CENC_64BIT_BAR_LOW +#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_CENC_64BIT_BAR_HIGH +#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_SRE_64BIT_BAR_LOW +#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_SRE_64BIT_BAR_HIGH +#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_REF_64BIT_BAR_LOW +#define UVD_LMI_MIF_REF_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_REF_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_REF_64BIT_BAR_HIGH +#define UVD_LMI_MIF_REF_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_REF_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_DBW_64BIT_BAR_LOW +#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_DBW_64BIT_BAR_HIGH +#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP0_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP1_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP2_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP3_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD0_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD1_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD2_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD3_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD4_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR_64BIT_BAR_LOW +#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH +#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_SPH_64BIT_BAR_HIGH +#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_ADP_ATOMIC_CONFIG +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE__SHIFT 0x0 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE__SHIFT 0x4 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE__SHIFT 0x8 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE__SHIFT 0xc +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG__SHIFT 0x10 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE_MASK 0x0000000FL +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE_MASK 0x000000F0L +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE_MASK 0x00000F00L +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE_MASK 0x0000F000L +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG_MASK 0x000F0000L +//UVD_LMI_ARB_CTRL2 +#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2 +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6 +#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa +#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14 +#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L +#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L +#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L +//UVD_LMI_VCPU_CACHE_VMIDS_MULTI +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L +//UVD_LMI_VCPU_NC_VMIDS_MULTI +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L +//UVD_LMI_LAT_CTRL +#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0 +#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8 +#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9 +#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa +#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb +#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10 +#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL +#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L +#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L +#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L +#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L +#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L +//UVD_LMI_LAT_CNTR +#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0 +#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8 +#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL +#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L +//UVD_LMI_AVG_LAT_CNTR +#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0 +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8 +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10 +#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L +//UVD_LMI_SPH +#define UVD_LMI_SPH__ADDR__SHIFT 0x0 +#define UVD_LMI_SPH__STS__SHIFT 0x1c +#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e +#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f +#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL +#define UVD_LMI_SPH__STS_MASK 0x30000000L +#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L +#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L +//UVD_LMI_VCPU_CACHE_VMID +#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL +//UVD_LMI_CTRL2 +#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0 +#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1 +#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2 +#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3 +#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4 +#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7 +#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8 +#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9 +#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb +#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd +#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe +#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf +#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10 +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11 +#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19 +#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a +#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b +#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L +#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L +#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L +#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L +#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L +#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L +#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L +#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L +#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L +#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L +#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L +#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L +#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L +#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L +#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L +#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L +//UVD_LMI_URGENT_CTRL +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2 +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12 +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L +//UVD_LMI_CTRL +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0 +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8 +#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9 +#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb +#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc +#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd +#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe +#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf +#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x14 +#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15 +#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16 +#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17 +#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18 +#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19 +#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a +#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b +#define UVD_LMI_CTRL__MC_BLK_RST__SHIFT 0x1c +#define UVD_LMI_CTRL__UMC_BLK_RST__SHIFT 0x1d +#define UVD_LMI_CTRL__RFU__SHIFT 0x1e +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L +#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L +#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L +#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L +#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L +#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L +#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L +#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L +#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L +#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L +#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L +#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L +#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L +#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L +#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L +#define UVD_LMI_CTRL__MC_BLK_RST_MASK 0x10000000L +#define UVD_LMI_CTRL__UMC_BLK_RST_MASK 0x20000000L +#define UVD_LMI_CTRL__RFU_MASK 0xC0000000L +//UVD_LMI_STATUS +#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0 +#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1 +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2 +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3 +#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6 +#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7 +#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8 +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9 +#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa +#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb +#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc +#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd +#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12 +#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13 +#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14 +#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15 +#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16 +#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L +#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L +#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L +#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L +#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L +#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L +#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L +#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L +#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L +#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L +#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L +#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L +#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L +//UVD_LMI_PERFMON_CTRL +#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0 +#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8 +#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L +#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L +//UVD_LMI_PERFMON_COUNT_LO +#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0 +#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL +//UVD_LMI_PERFMON_COUNT_HI +#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0 +#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL +//UVD_LMI_ADP_SWAP_CNTL +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x6 +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x8 +#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP__SHIFT 0xa +#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP__SHIFT 0xc +#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0xe +#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x10 +#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x12 +#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP__SHIFT 0x14 +#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x18 +#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x1c +#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x1e +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000C0L +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L +#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000C00L +#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L +#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000C000L +#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L +#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000C0000L +#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP_MASK 0x00300000L +#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L +#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L +#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L +//UVD_LMI_RBC_RB_VMID +#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0 +#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL +//UVD_LMI_RBC_IB_VMID +#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0 +#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL +//UVD_LMI_MC_CREDITS +#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0 +#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8 +#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10 +#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18 +#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL +#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L +#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L +#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L +//UVD_LMI_ADP_IND_INDEX +#define UVD_LMI_ADP_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_LMI_ADP_IND_INDEX__INDEX_MASK 0x00001FFFL +//UVD_LMI_ADP_IND_DATA +#define UVD_LMI_ADP_IND_DATA__DATA__SHIFT 0x0 +#define UVD_LMI_ADP_IND_DATA__DATA_MASK 0xFFFFFFFFL +//UVD_LMI_ADP_PF_EN +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN__SHIFT 0x0 +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN__SHIFT 0x1 +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN__SHIFT 0x2 +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN_MASK 0x00000001L +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN_MASK 0x00000002L +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN_MASK 0x00000004L +//UVD_LMI_PREF_CTRL +#define UVD_LMI_PREF_CTRL__PREF_RST__SHIFT 0x0 +#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS__SHIFT 0x1 +#define UVD_LMI_PREF_CTRL__PREF_WSTRB__SHIFT 0x2 +#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE__SHIFT 0x3 +#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE__SHIFT 0x4 +#define UVD_LMI_PREF_CTRL__PREF_SIZE__SHIFT 0x13 +#define UVD_LMI_PREF_CTRL__PREF_RST_MASK 0x00000001L +#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS_MASK 0x00000002L +#define UVD_LMI_PREF_CTRL__PREF_WSTRB_MASK 0x00000004L +#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE_MASK 0x00000008L +#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE_MASK 0x00000070L +#define UVD_LMI_PREF_CTRL__PREF_SIZE_MASK 0xFFF80000L +//UVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//VCN_RAS_CNTL +#define VCN_RAS_CNTL__VCPU_VCODEC_IH_EN__SHIFT 0x0 +#define VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN__SHIFT 0x4 +#define VCN_RAS_CNTL__VCPU_VCODEC_REARM__SHIFT 0x8 +#define VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN__SHIFT 0xc +#define VCN_RAS_CNTL__VCPU_VCODEC_READY__SHIFT 0x10 +#define VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK 0x00000001L +#define VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK 0x00000010L +#define VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK 0x00000100L +#define VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK 0x00001000L +#define VCN_RAS_CNTL__VCPU_VCODEC_READY_MASK 0x00010000L + + +// addressBlock: aid_uvd0_uvd_jpeg0_jpegnpdec +//UVD_JPEG_CNTL +#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1 +#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2 +#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8 +#define UVD_JPEG_CNTL__FORMAT_CONV_EN__SHIFT 0x10 +#define UVD_JPEG_CNTL__VUP_MODE__SHIFT 0x11 +#define UVD_JPEG_CNTL__FC_TIMEOUT_EN__SHIFT 0x12 +#define UVD_JPEG_CNTL__ROI_CROP_EN__SHIFT 0x18 +#define UVD_JPEG_CNTL__ROI_CROP_EARLY_DECODE_STOP_DIS__SHIFT 0x19 +#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L +#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L +#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L +#define UVD_JPEG_CNTL__FORMAT_CONV_EN_MASK 0x00010000L +#define UVD_JPEG_CNTL__VUP_MODE_MASK 0x00020000L +#define UVD_JPEG_CNTL__FC_TIMEOUT_EN_MASK 0x00040000L +#define UVD_JPEG_CNTL__ROI_CROP_EN_MASK 0x01000000L +#define UVD_JPEG_CNTL__ROI_CROP_EARLY_DECODE_STOP_DIS_MASK 0x02000000L +//UVD_JPEG_RB_BASE +#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0 +#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6 +#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL +#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L +//UVD_JPEG_RB_WPTR +#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L +//UVD_JPEG_RB_RPTR +#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L +//UVD_JPEG_RB_SIZE +#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L +//UVD_JPEG_DEC_CNT +#define UVD_JPEG_DEC_CNT__DECODE_COUNT__SHIFT 0x0 +#define UVD_JPEG_DEC_CNT__DECODE_COUNT_MASK 0xFFFFFFFFL +//UVD_JPEG_SPS_INFO +#define UVD_JPEG_SPS_INFO__PIC_WIDTH__SHIFT 0x0 +#define UVD_JPEG_SPS_INFO__PIC_HEIGHT__SHIFT 0x10 +#define UVD_JPEG_SPS_INFO__PIC_WIDTH_MASK 0x0000FFFFL +#define UVD_JPEG_SPS_INFO__PIC_HEIGHT_MASK 0xFFFF0000L +//UVD_JPEG_SPS1_INFO +#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC__SHIFT 0x0 +#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT__SHIFT 0x3 +#define UVD_JPEG_SPS1_INFO__OUT_FMT_422__SHIFT 0x4 +#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC_MASK 0x00000007L +#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT_MASK 0x00000008L +#define UVD_JPEG_SPS1_INFO__OUT_FMT_422_MASK 0x00000010L +//UVD_JPEG_RE_TIMER +#define UVD_JPEG_RE_TIMER__TIMER_OUT__SHIFT 0x0 +#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN__SHIFT 0x10 +#define UVD_JPEG_RE_TIMER__TIMER_OUT_MASK 0x000000FFL +#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN_MASK 0x00010000L +//UVD_JPEG_DEC_SCRATCH0 +#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL +//UVD_JPEG_INT_EN +#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0 +#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1 +#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2 +#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6 +#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7 +#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8 +#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9 +#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa +#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb +#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc +#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd +#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe +#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf +#define UVD_JPEG_INT_EN__FC_TIMEOUT_ERR_EN__SHIFT 0x10 +#define UVD_JPEG_INT_EN__FC_FMT_ERR_EN__SHIFT 0x11 +#define UVD_JPEG_INT_EN__FC_SRC_ERR_EN__SHIFT 0x12 +#define UVD_JPEG_INT_EN__CROP_SIZE_ERR_EN__SHIFT 0x13 +#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L +#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L +#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L +#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L +#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L +#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L +#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L +#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L +#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L +#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L +#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L +#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L +#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L +#define UVD_JPEG_INT_EN__FC_TIMEOUT_ERR_EN_MASK 0x00010000L +#define UVD_JPEG_INT_EN__FC_FMT_ERR_EN_MASK 0x00020000L +#define UVD_JPEG_INT_EN__FC_SRC_ERR_EN_MASK 0x00040000L +#define UVD_JPEG_INT_EN__CROP_SIZE_ERR_EN_MASK 0x00080000L +//UVD_JPEG_INT_STAT +#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0 +#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1 +#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2 +#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6 +#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7 +#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8 +#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9 +#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa +#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb +#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc +#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd +#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe +#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf +#define UVD_JPEG_INT_STAT__FC_TIMEOUT_ERR_INT__SHIFT 0x10 +#define UVD_JPEG_INT_STAT__FC_FMT_ERR_INT__SHIFT 0x11 +#define UVD_JPEG_INT_STAT__FC_SRC_ERR_INT__SHIFT 0x12 +#define UVD_JPEG_INT_STAT__CROP_SIZE_ERR_INT__SHIFT 0x13 +#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L +#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L +#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L +#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L +#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L +#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L +#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L +#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L +#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L +#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L +#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L +#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L +#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L +#define UVD_JPEG_INT_STAT__FC_TIMEOUT_ERR_INT_MASK 0x00010000L +#define UVD_JPEG_INT_STAT__FC_FMT_ERR_INT_MASK 0x00020000L +#define UVD_JPEG_INT_STAT__FC_SRC_ERR_INT_MASK 0x00040000L +#define UVD_JPEG_INT_STAT__CROP_SIZE_ERR_INT_MASK 0x00080000L +//UVD_JPEG_TIER_CNTL0 +#define UVD_JPEG_TIER_CNTL0__TIER_SEL__SHIFT 0x0 +#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID__SHIFT 0x2 +#define UVD_JPEG_TIER_CNTL0__U_COMP_ID__SHIFT 0x4 +#define UVD_JPEG_TIER_CNTL0__V_COMP_ID__SHIFT 0x6 +#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC__SHIFT 0x8 +#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC__SHIFT 0xb +#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC__SHIFT 0xe +#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC__SHIFT 0x11 +#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC__SHIFT 0x14 +#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC__SHIFT 0x17 +#define UVD_JPEG_TIER_CNTL0__Y_TQ__SHIFT 0x1a +#define UVD_JPEG_TIER_CNTL0__U_TQ__SHIFT 0x1c +#define UVD_JPEG_TIER_CNTL0__V_TQ__SHIFT 0x1e +#define UVD_JPEG_TIER_CNTL0__TIER_SEL_MASK 0x00000003L +#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID_MASK 0x0000000CL +#define UVD_JPEG_TIER_CNTL0__U_COMP_ID_MASK 0x00000030L +#define UVD_JPEG_TIER_CNTL0__V_COMP_ID_MASK 0x000000C0L +#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC_MASK 0x00000700L +#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC_MASK 0x00003800L +#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC_MASK 0x0001C000L +#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC_MASK 0x000E0000L +#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC_MASK 0x00700000L +#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC_MASK 0x03800000L +#define UVD_JPEG_TIER_CNTL0__Y_TQ_MASK 0x0C000000L +#define UVD_JPEG_TIER_CNTL0__U_TQ_MASK 0x30000000L +#define UVD_JPEG_TIER_CNTL0__V_TQ_MASK 0xC0000000L +//UVD_JPEG_TIER_CNTL1 +#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH__SHIFT 0x0 +#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT__SHIFT 0x10 +#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH_MASK 0x0000FFFFL +#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT_MASK 0xFFFF0000L +//UVD_JPEG_TIER_CNTL2 +#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL__SHIFT 0x0 +#define UVD_JPEG_TIER_CNTL2__TBL_TYPE__SHIFT 0x1 +#define UVD_JPEG_TIER_CNTL2__TQ__SHIFT 0x2 +#define UVD_JPEG_TIER_CNTL2__TH__SHIFT 0x4 +#define UVD_JPEG_TIER_CNTL2__TC__SHIFT 0x6 +#define UVD_JPEG_TIER_CNTL2__TD__SHIFT 0x7 +#define UVD_JPEG_TIER_CNTL2__TA__SHIFT 0xa +#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN__SHIFT 0xe +#define UVD_JPEG_TIER_CNTL2__DRI_VAL__SHIFT 0x10 +#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL_MASK 0x00000001L +#define UVD_JPEG_TIER_CNTL2__TBL_TYPE_MASK 0x00000002L +#define UVD_JPEG_TIER_CNTL2__TQ_MASK 0x0000000CL +#define UVD_JPEG_TIER_CNTL2__TH_MASK 0x00000030L +#define UVD_JPEG_TIER_CNTL2__TC_MASK 0x00000040L +#define UVD_JPEG_TIER_CNTL2__TD_MASK 0x00000380L +#define UVD_JPEG_TIER_CNTL2__TA_MASK 0x00001C00L +#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN_MASK 0x00004000L +#define UVD_JPEG_TIER_CNTL2__DRI_VAL_MASK 0xFFFF0000L +//UVD_JPEG_TIER_STATUS +#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE__SHIFT 0x0 +#define UVD_JPEG_TIER_STATUS__DECODE_DONE__SHIFT 0x1 +#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE_MASK 0x00000001L +#define UVD_JPEG_TIER_STATUS__DECODE_DONE_MASK 0x00000002L + + +// addressBlock: aid_uvd0_uvd_jpeg_sclk0_jpegnpsclkdec +//UVD_JPEG_OUTBUF_CNTL +#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT__SHIFT 0x0 +#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN__SHIFT 0x2 +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX__SHIFT 0x6 +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT__SHIFT 0x7 +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER__SHIFT 0x9 +#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK__SHIFT 0x10 +#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT_MASK 0x00000003L +#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN_MASK 0x00000004L +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX_MASK 0x00000040L +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT_MASK 0x00000180L +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER_MASK 0x00001E00L +#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK_MASK 0x00010000L +//UVD_JPEG_OUTBUF_WPTR +#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR__SHIFT 0x0 +#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR_MASK 0xFFFFFFFFL +//UVD_JPEG_OUTBUF_RPTR +#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR__SHIFT 0x0 +#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR_MASK 0xFFFFFFFFL +//UVD_JPEG_PITCH +#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0 +#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL +//UVD_JPEG_UV_PITCH +#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0 +#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL +//JPEG_DEC_Y_GFX8_TILING_SURFACE +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L +//JPEG_DEC_UV_GFX8_TILING_SURFACE +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L +//JPEG_DEC_GFX8_ADDR_CONFIG +#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4 +#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L +//JPEG_DEC_Y_GFX10_TILING_SURFACE +#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_DEC_UV_GFX10_TILING_SURFACE +#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_DEC_GFX10_ADDR_CONFIG +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//JPEG_DEC_ADDR_MODE +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0 +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2 +#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL +#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L +//UVD_JPEG_OUTPUT_XY +#define UVD_JPEG_OUTPUT_XY__OUTPUT_X__SHIFT 0x0 +#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y__SHIFT 0x10 +#define UVD_JPEG_OUTPUT_XY__OUTPUT_X_MASK 0x00003FFFL +#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y_MASK 0x3FFF0000L +//UVD_JPEG_GPCOM_CMD +#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1 +#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL +//UVD_JPEG_GPCOM_DATA0 +#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0 +#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_GPCOM_DATA1 +#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0 +#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_JPEG_SCRATCH1 +#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0 +#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL +//UVD_JPEG_DEC_SOFT_RST +#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0 +#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10 +#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L +#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L + + +// addressBlock: aid_uvd0_uvd_jrbc0_uvd_jrbc_dec +//UVD_JRBC0_UVD_JRBC_RB_WPTR +#define UVD_JRBC0_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC0_UVD_JRBC_RB_CNTL +#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC0_UVD_JRBC_IB_SIZE +#define UVD_JRBC0_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC0_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC0_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC0_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC0_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC0_UVD_JRBC_SOFT_RESET +#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC0_UVD_JRBC_STATUS +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC0_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC0_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC0_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC0_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC0_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC0_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC0_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC0_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC0_UVD_JRBC_RB_RPTR +#define UVD_JRBC0_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC0_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC0_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC0_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC0_UVD_JRBC_RB_SIZE +#define UVD_JRBC0_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC0_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC0_UVD_JRBC_SCRATCH0 +#define UVD_JRBC0_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC0_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jmi0_uvd_jmi_dec +//UVD_JMI0_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI0_UVD_LMI_JRBC_CTRL +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI0_UVD_LMI_JPEG_CTRL +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI0_JPEG_LMI_DROP +#define UVD_JMI0_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI0_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI0_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI0_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI0_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI0_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI0_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI0_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI0_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI0_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI0_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI0_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI0_UVD_LMI_JPEG_VMID +#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI0_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI0_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI0_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI0_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi_common_dec +//UVD_JADP_MCIF_URGENT_CTRL +#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK__SHIFT 0x0 +#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK__SHIFT 0x6 +#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER__SHIFT 0xb +#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP__SHIFT 0x11 +#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP__SHIFT 0x15 +#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN__SHIFT 0x19 +#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN__SHIFT 0x1a +#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK_MASK 0x0000003FL +#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK_MASK 0x000007C0L +#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER_MASK 0x0001F800L +#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP_MASK 0x001E0000L +#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP_MASK 0x01E00000L +#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN_MASK 0x02000000L +#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN_MASK 0x04000000L +//UVD_JMI_URGENT_CTRL +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0 +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x4 +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x10 +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0x14 +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x000000F0L +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00010000L +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00F00000L +//UVD_JMI_CTRL +#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0 +#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1 +#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2 +#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8 +#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10 +#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L +#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L +#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L +#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L +#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L +//JPEG_MEMCHECK_CLAMPING_CNTL +#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN__SHIFT 0x0 +#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN_MASK 0x00000001L +//JPEG_MEMCHECK_SAFE_ADDR +#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR__SHIFT 0x0 +#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR_MASK 0xFFFFFFFFL +//JPEG_MEMCHECK_SAFE_ADDR_64BIT +#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT__SHIFT 0x0 +#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT_MASK 0xFFFFFFFFL +//UVD_JMI_LAT_CTRL +#define UVD_JMI_LAT_CTRL__SCALE__SHIFT 0x0 +#define UVD_JMI_LAT_CTRL__MAX_START__SHIFT 0x8 +#define UVD_JMI_LAT_CTRL__MIN_START__SHIFT 0x9 +#define UVD_JMI_LAT_CTRL__AVG_START__SHIFT 0xa +#define UVD_JMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb +#define UVD_JMI_LAT_CTRL__SKIP__SHIFT 0x10 +#define UVD_JMI_LAT_CTRL__SCALE_MASK 0x000000FFL +#define UVD_JMI_LAT_CTRL__MAX_START_MASK 0x00000100L +#define UVD_JMI_LAT_CTRL__MIN_START_MASK 0x00000200L +#define UVD_JMI_LAT_CTRL__AVG_START_MASK 0x00000400L +#define UVD_JMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L +#define UVD_JMI_LAT_CTRL__SKIP_MASK 0x000F0000L +//UVD_JMI_LAT_CNTR +#define UVD_JMI_LAT_CNTR__MAX_LAT__SHIFT 0x0 +#define UVD_JMI_LAT_CNTR__MIN_LAT__SHIFT 0x8 +#define UVD_JMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL +#define UVD_JMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L +//UVD_JMI_AVG_LAT_CNTR +#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0 +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8 +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10 +#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L +//UVD_JMI_PERFMON_CTRL +#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0 +#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8 +#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L +#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L +//UVD_JMI_PERFMON_COUNT_LO +#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0 +#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL +//UVD_JMI_PERFMON_COUNT_HI +#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0 +#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL +//UVD_JMI_CLEAN_STATUS +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN__SHIFT 0x0 +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW__SHIFT 0x1 +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN__SHIFT 0x2 +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW__SHIFT 0x3 +#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING__SHIFT 0x4 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN__SHIFT 0x8 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_READ_CLEAN__SHIFT 0x9 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_READ_CLEAN__SHIFT 0xa +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_READ_CLEAN__SHIFT 0xb +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_READ_CLEAN__SHIFT 0xc +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_READ_CLEAN__SHIFT 0xd +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_READ_CLEAN__SHIFT 0xe +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_READ_CLEAN__SHIFT 0xf +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN__SHIFT 0x10 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_WRITE_CLEAN__SHIFT 0x11 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_WRITE_CLEAN__SHIFT 0x12 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_WRITE_CLEAN__SHIFT 0x13 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_WRITE_CLEAN__SHIFT 0x14 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_WRITE_CLEAN__SHIFT 0x15 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_WRITE_CLEAN__SHIFT 0x16 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_WRITE_CLEAN__SHIFT 0x17 +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_MASK 0x00000001L +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW_MASK 0x00000002L +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_MASK 0x00000004L +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW_MASK 0x00000008L +#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING_MASK 0x00000010L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN_MASK 0x00000100L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_READ_CLEAN_MASK 0x00000200L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_READ_CLEAN_MASK 0x00000400L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_READ_CLEAN_MASK 0x00000800L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_READ_CLEAN_MASK 0x00001000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_READ_CLEAN_MASK 0x00002000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_READ_CLEAN_MASK 0x00004000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_READ_CLEAN_MASK 0x00008000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN_MASK 0x00010000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_WRITE_CLEAN_MASK 0x00020000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_WRITE_CLEAN_MASK 0x00040000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_WRITE_CLEAN_MASK 0x00080000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_WRITE_CLEAN_MASK 0x00100000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_WRITE_CLEAN_MASK 0x00200000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_WRITE_CLEAN_MASK 0x00400000L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_WRITE_CLEAN_MASK 0x00800000L +//UVD_JMI_CNTL +#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0 +#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8 +#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L +#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L + + +// addressBlock: aid_uvd0_uvd_jpeg_common_dec +//JPEG_SOFT_RESET_STATUS +#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS__SHIFT 0x0 +#define JPEG_SOFT_RESET_STATUS__JPEG1_DEC_RESET_STATUS__SHIFT 0x1 +#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS__SHIFT 0x2 +#define JPEG_SOFT_RESET_STATUS__JPEG3_DEC_RESET_STATUS__SHIFT 0x3 +#define JPEG_SOFT_RESET_STATUS__JPEG4_DEC_RESET_STATUS__SHIFT 0x4 +#define JPEG_SOFT_RESET_STATUS__JPEG5_DEC_RESET_STATUS__SHIFT 0x5 +#define JPEG_SOFT_RESET_STATUS__JPEG6_DEC_RESET_STATUS__SHIFT 0x6 +#define JPEG_SOFT_RESET_STATUS__JPEG7_DEC_RESET_STATUS__SHIFT 0x7 +#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS__SHIFT 0x8 +#define JPEG_SOFT_RESET_STATUS__DJRBC1_RESET_STATUS__SHIFT 0x9 +#define JPEG_SOFT_RESET_STATUS__DJRBC2_RESET_STATUS__SHIFT 0xa +#define JPEG_SOFT_RESET_STATUS__DJRBC3_RESET_STATUS__SHIFT 0xb +#define JPEG_SOFT_RESET_STATUS__DJRBC4_RESET_STATUS__SHIFT 0xc +#define JPEG_SOFT_RESET_STATUS__DJRBC5_RESET_STATUS__SHIFT 0xd +#define JPEG_SOFT_RESET_STATUS__DJRBC6_RESET_STATUS__SHIFT 0xe +#define JPEG_SOFT_RESET_STATUS__DJRBC7_RESET_STATUS__SHIFT 0xf +#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x11 +#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x12 +#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x18 +#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS_MASK 0x00000001L +#define JPEG_SOFT_RESET_STATUS__JPEG1_DEC_RESET_STATUS_MASK 0x00000002L +#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS_MASK 0x00000004L +#define JPEG_SOFT_RESET_STATUS__JPEG3_DEC_RESET_STATUS_MASK 0x00000008L +#define JPEG_SOFT_RESET_STATUS__JPEG4_DEC_RESET_STATUS_MASK 0x00000010L +#define JPEG_SOFT_RESET_STATUS__JPEG5_DEC_RESET_STATUS_MASK 0x00000020L +#define JPEG_SOFT_RESET_STATUS__JPEG6_DEC_RESET_STATUS_MASK 0x00000040L +#define JPEG_SOFT_RESET_STATUS__JPEG7_DEC_RESET_STATUS_MASK 0x00000080L +#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS_MASK 0x00000100L +#define JPEG_SOFT_RESET_STATUS__DJRBC1_RESET_STATUS_MASK 0x00000200L +#define JPEG_SOFT_RESET_STATUS__DJRBC2_RESET_STATUS_MASK 0x00000400L +#define JPEG_SOFT_RESET_STATUS__DJRBC3_RESET_STATUS_MASK 0x00000800L +#define JPEG_SOFT_RESET_STATUS__DJRBC4_RESET_STATUS_MASK 0x00001000L +#define JPEG_SOFT_RESET_STATUS__DJRBC5_RESET_STATUS_MASK 0x00002000L +#define JPEG_SOFT_RESET_STATUS__DJRBC6_RESET_STATUS_MASK 0x00004000L +#define JPEG_SOFT_RESET_STATUS__DJRBC7_RESET_STATUS_MASK 0x00008000L +#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00020000L +#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00040000L +#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x01000000L +//JPEG_SYS_INT_EN +#define JPEG_SYS_INT_EN__DJPEG0_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_EN__DJPEG1_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_EN__DJPEG2_CORE__SHIFT 0x2 +#define JPEG_SYS_INT_EN__DJPEG3_CORE__SHIFT 0x3 +#define JPEG_SYS_INT_EN__DJPEG4_CORE__SHIFT 0x4 +#define JPEG_SYS_INT_EN__DJPEG5_CORE__SHIFT 0x5 +#define JPEG_SYS_INT_EN__DJPEG6_CORE__SHIFT 0x6 +#define JPEG_SYS_INT_EN__DJPEG7_CORE__SHIFT 0x7 +#define JPEG_SYS_INT_EN__DJRBC0__SHIFT 0x8 +#define JPEG_SYS_INT_EN__DJRBC1__SHIFT 0x9 +#define JPEG_SYS_INT_EN__DJRBC2__SHIFT 0xa +#define JPEG_SYS_INT_EN__DJRBC3__SHIFT 0xb +#define JPEG_SYS_INT_EN__DJRBC4__SHIFT 0xc +#define JPEG_SYS_INT_EN__DJRBC5__SHIFT 0xd +#define JPEG_SYS_INT_EN__DJRBC6__SHIFT 0xe +#define JPEG_SYS_INT_EN__DJRBC7__SHIFT 0xf +#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT__SHIFT 0x10 +#define JPEG_SYS_INT_EN__DJPEG1_PF_RPT__SHIFT 0x11 +#define JPEG_SYS_INT_EN__DJPEG2_PF_RPT__SHIFT 0x12 +#define JPEG_SYS_INT_EN__DJPEG3_PF_RPT__SHIFT 0x13 +#define JPEG_SYS_INT_EN__DJPEG4_PF_RPT__SHIFT 0x14 +#define JPEG_SYS_INT_EN__DJPEG5_PF_RPT__SHIFT 0x15 +#define JPEG_SYS_INT_EN__DJPEG6_PF_RPT__SHIFT 0x16 +#define JPEG_SYS_INT_EN__DJPEG7_PF_RPT__SHIFT 0x17 +#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL__SHIFT 0x18 +#define JPEG_SYS_INT_EN__DJPEG1_RAS_CNTL__SHIFT 0x19 +#define JPEG_SYS_INT_EN__DJPEG0_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_EN__DJPEG1_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_EN__DJPEG2_CORE_MASK 0x00000004L +#define JPEG_SYS_INT_EN__DJPEG3_CORE_MASK 0x00000008L +#define JPEG_SYS_INT_EN__DJPEG4_CORE_MASK 0x00000010L +#define JPEG_SYS_INT_EN__DJPEG5_CORE_MASK 0x00000020L +#define JPEG_SYS_INT_EN__DJPEG6_CORE_MASK 0x00000040L +#define JPEG_SYS_INT_EN__DJPEG7_CORE_MASK 0x00000080L +#define JPEG_SYS_INT_EN__DJRBC0_MASK 0x00000100L +#define JPEG_SYS_INT_EN__DJRBC1_MASK 0x00000200L +#define JPEG_SYS_INT_EN__DJRBC2_MASK 0x00000400L +#define JPEG_SYS_INT_EN__DJRBC3_MASK 0x00000800L +#define JPEG_SYS_INT_EN__DJRBC4_MASK 0x00001000L +#define JPEG_SYS_INT_EN__DJRBC5_MASK 0x00002000L +#define JPEG_SYS_INT_EN__DJRBC6_MASK 0x00004000L +#define JPEG_SYS_INT_EN__DJRBC7_MASK 0x00008000L +#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT_MASK 0x00010000L +#define JPEG_SYS_INT_EN__DJPEG1_PF_RPT_MASK 0x00020000L +#define JPEG_SYS_INT_EN__DJPEG2_PF_RPT_MASK 0x00040000L +#define JPEG_SYS_INT_EN__DJPEG3_PF_RPT_MASK 0x00080000L +#define JPEG_SYS_INT_EN__DJPEG4_PF_RPT_MASK 0x00100000L +#define JPEG_SYS_INT_EN__DJPEG5_PF_RPT_MASK 0x00200000L +#define JPEG_SYS_INT_EN__DJPEG6_PF_RPT_MASK 0x00400000L +#define JPEG_SYS_INT_EN__DJPEG7_PF_RPT_MASK 0x00800000L +#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL_MASK 0x01000000L +#define JPEG_SYS_INT_EN__DJPEG1_RAS_CNTL_MASK 0x02000000L +//JPEG_SYS_INT_EN1 +#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT__SHIFT 0x0 +#define JPEG_SYS_INT_EN1__EJPEG_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_EN1__EJRBC__SHIFT 0x2 +#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL__SHIFT 0x3 +#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT_MASK 0x00000001L +#define JPEG_SYS_INT_EN1__EJPEG_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_EN1__EJRBC_MASK 0x00000004L +#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL_MASK 0x00000008L +//JPEG_SYS_INT_STATUS +#define JPEG_SYS_INT_STATUS__DJPEG0_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_STATUS__DJPEG1_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_STATUS__DJPEG2_CORE__SHIFT 0x2 +#define JPEG_SYS_INT_STATUS__DJPEG3_CORE__SHIFT 0x3 +#define JPEG_SYS_INT_STATUS__DJPEG4_CORE__SHIFT 0x4 +#define JPEG_SYS_INT_STATUS__DJPEG5_CORE__SHIFT 0x5 +#define JPEG_SYS_INT_STATUS__DJPEG6_CORE__SHIFT 0x6 +#define JPEG_SYS_INT_STATUS__DJPEG7_CORE__SHIFT 0x7 +#define JPEG_SYS_INT_STATUS__DJRBC0__SHIFT 0x8 +#define JPEG_SYS_INT_STATUS__DJRBC1__SHIFT 0x9 +#define JPEG_SYS_INT_STATUS__DJRBC2__SHIFT 0xa +#define JPEG_SYS_INT_STATUS__DJRBC3__SHIFT 0xb +#define JPEG_SYS_INT_STATUS__DJRBC4__SHIFT 0xc +#define JPEG_SYS_INT_STATUS__DJRBC5__SHIFT 0xd +#define JPEG_SYS_INT_STATUS__DJRBC6__SHIFT 0xe +#define JPEG_SYS_INT_STATUS__DJRBC7__SHIFT 0xf +#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT__SHIFT 0x10 +#define JPEG_SYS_INT_STATUS__DJPEG1_PF_RPT__SHIFT 0x11 +#define JPEG_SYS_INT_STATUS__DJPEG2_PF_RPT__SHIFT 0x12 +#define JPEG_SYS_INT_STATUS__DJPEG3_PF_RPT__SHIFT 0x13 +#define JPEG_SYS_INT_STATUS__DJPEG4_PF_RPT__SHIFT 0x14 +#define JPEG_SYS_INT_STATUS__DJPEG5_PF_RPT__SHIFT 0x15 +#define JPEG_SYS_INT_STATUS__DJPEG6_PF_RPT__SHIFT 0x16 +#define JPEG_SYS_INT_STATUS__DJPEG7_PF_RPT__SHIFT 0x17 +#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL__SHIFT 0x18 +#define JPEG_SYS_INT_STATUS__DJPEG1_RAS_CNTL__SHIFT 0x19 +#define JPEG_SYS_INT_STATUS__DJPEG0_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_STATUS__DJPEG1_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_STATUS__DJPEG2_CORE_MASK 0x00000004L +#define JPEG_SYS_INT_STATUS__DJPEG3_CORE_MASK 0x00000008L +#define JPEG_SYS_INT_STATUS__DJPEG4_CORE_MASK 0x00000010L +#define JPEG_SYS_INT_STATUS__DJPEG5_CORE_MASK 0x00000020L +#define JPEG_SYS_INT_STATUS__DJPEG6_CORE_MASK 0x00000040L +#define JPEG_SYS_INT_STATUS__DJPEG7_CORE_MASK 0x00000080L +#define JPEG_SYS_INT_STATUS__DJRBC0_MASK 0x00000100L +#define JPEG_SYS_INT_STATUS__DJRBC1_MASK 0x00000200L +#define JPEG_SYS_INT_STATUS__DJRBC2_MASK 0x00000400L +#define JPEG_SYS_INT_STATUS__DJRBC3_MASK 0x00000800L +#define JPEG_SYS_INT_STATUS__DJRBC4_MASK 0x00001000L +#define JPEG_SYS_INT_STATUS__DJRBC5_MASK 0x00002000L +#define JPEG_SYS_INT_STATUS__DJRBC6_MASK 0x00004000L +#define JPEG_SYS_INT_STATUS__DJRBC7_MASK 0x00008000L +#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT_MASK 0x00010000L +#define JPEG_SYS_INT_STATUS__DJPEG1_PF_RPT_MASK 0x00020000L +#define JPEG_SYS_INT_STATUS__DJPEG2_PF_RPT_MASK 0x00040000L +#define JPEG_SYS_INT_STATUS__DJPEG3_PF_RPT_MASK 0x00080000L +#define JPEG_SYS_INT_STATUS__DJPEG4_PF_RPT_MASK 0x00100000L +#define JPEG_SYS_INT_STATUS__DJPEG5_PF_RPT_MASK 0x00200000L +#define JPEG_SYS_INT_STATUS__DJPEG6_PF_RPT_MASK 0x00400000L +#define JPEG_SYS_INT_STATUS__DJPEG7_PF_RPT_MASK 0x00800000L +#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL_MASK 0x01000000L +#define JPEG_SYS_INT_STATUS__DJPEG1_RAS_CNTL_MASK 0x02000000L +//JPEG_SYS_INT_STATUS1 +#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT__SHIFT 0x0 +#define JPEG_SYS_INT_STATUS1__EJPEG_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_STATUS1__EJRBC__SHIFT 0x2 +#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL__SHIFT 0x3 +#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT_MASK 0x00000001L +#define JPEG_SYS_INT_STATUS1__EJPEG_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_STATUS1__EJRBC_MASK 0x00000004L +#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL_MASK 0x00000008L +//JPEG_SYS_INT_ACK +#define JPEG_SYS_INT_ACK__DJPEG0_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_ACK__DJPEG1_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_ACK__DJPEG2_CORE__SHIFT 0x2 +#define JPEG_SYS_INT_ACK__DJPEG3_CORE__SHIFT 0x3 +#define JPEG_SYS_INT_ACK__DJPEG4_CORE__SHIFT 0x4 +#define JPEG_SYS_INT_ACK__DJPEG5_CORE__SHIFT 0x5 +#define JPEG_SYS_INT_ACK__DJPEG6_CORE__SHIFT 0x6 +#define JPEG_SYS_INT_ACK__DJPEG7_CORE__SHIFT 0x7 +#define JPEG_SYS_INT_ACK__DJRBC0__SHIFT 0x8 +#define JPEG_SYS_INT_ACK__DJRBC1__SHIFT 0x9 +#define JPEG_SYS_INT_ACK__DJRBC2__SHIFT 0xa +#define JPEG_SYS_INT_ACK__DJRBC3__SHIFT 0xb +#define JPEG_SYS_INT_ACK__DJRBC4__SHIFT 0xc +#define JPEG_SYS_INT_ACK__DJRBC5__SHIFT 0xd +#define JPEG_SYS_INT_ACK__DJRBC6__SHIFT 0xe +#define JPEG_SYS_INT_ACK__DJRBC7__SHIFT 0xf +#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT__SHIFT 0x10 +#define JPEG_SYS_INT_ACK__DJPEG1_PF_RPT__SHIFT 0x11 +#define JPEG_SYS_INT_ACK__DJPEG2_PF_RPT__SHIFT 0x12 +#define JPEG_SYS_INT_ACK__DJPEG3_PF_RPT__SHIFT 0x13 +#define JPEG_SYS_INT_ACK__DJPEG4_PF_RPT__SHIFT 0x14 +#define JPEG_SYS_INT_ACK__DJPEG5_PF_RPT__SHIFT 0x15 +#define JPEG_SYS_INT_ACK__DJPEG6_PF_RPT__SHIFT 0x16 +#define JPEG_SYS_INT_ACK__DJPEG7_PF_RPT__SHIFT 0x17 +#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL__SHIFT 0x18 +#define JPEG_SYS_INT_ACK__DJPEG1_RAS_CNTL__SHIFT 0x19 +#define JPEG_SYS_INT_ACK__DJPEG0_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_ACK__DJPEG1_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_ACK__DJPEG2_CORE_MASK 0x00000004L +#define JPEG_SYS_INT_ACK__DJPEG3_CORE_MASK 0x00000008L +#define JPEG_SYS_INT_ACK__DJPEG4_CORE_MASK 0x00000010L +#define JPEG_SYS_INT_ACK__DJPEG5_CORE_MASK 0x00000020L +#define JPEG_SYS_INT_ACK__DJPEG6_CORE_MASK 0x00000040L +#define JPEG_SYS_INT_ACK__DJPEG7_CORE_MASK 0x00000080L +#define JPEG_SYS_INT_ACK__DJRBC0_MASK 0x00000100L +#define JPEG_SYS_INT_ACK__DJRBC1_MASK 0x00000200L +#define JPEG_SYS_INT_ACK__DJRBC2_MASK 0x00000400L +#define JPEG_SYS_INT_ACK__DJRBC3_MASK 0x00000800L +#define JPEG_SYS_INT_ACK__DJRBC4_MASK 0x00001000L +#define JPEG_SYS_INT_ACK__DJRBC5_MASK 0x00002000L +#define JPEG_SYS_INT_ACK__DJRBC6_MASK 0x00004000L +#define JPEG_SYS_INT_ACK__DJRBC7_MASK 0x00008000L +#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT_MASK 0x00010000L +#define JPEG_SYS_INT_ACK__DJPEG1_PF_RPT_MASK 0x00020000L +#define JPEG_SYS_INT_ACK__DJPEG2_PF_RPT_MASK 0x00040000L +#define JPEG_SYS_INT_ACK__DJPEG3_PF_RPT_MASK 0x00080000L +#define JPEG_SYS_INT_ACK__DJPEG4_PF_RPT_MASK 0x00100000L +#define JPEG_SYS_INT_ACK__DJPEG5_PF_RPT_MASK 0x00200000L +#define JPEG_SYS_INT_ACK__DJPEG6_PF_RPT_MASK 0x00400000L +#define JPEG_SYS_INT_ACK__DJPEG7_PF_RPT_MASK 0x00800000L +#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL_MASK 0x01000000L +#define JPEG_SYS_INT_ACK__DJPEG1_RAS_CNTL_MASK 0x02000000L +//JPEG_SYS_INT_ACK1 +#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT__SHIFT 0x0 +#define JPEG_SYS_INT_ACK1__EJPEG_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_ACK1__EJRBC__SHIFT 0x2 +#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL__SHIFT 0x3 +#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT_MASK 0x00000001L +#define JPEG_SYS_INT_ACK1__EJPEG_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_ACK1__EJRBC_MASK 0x00000004L +#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL_MASK 0x00000008L +//JPEG_MEMCHECK_SYS_INT_EN +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_RD_ERR_EN__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_RD_ERR_EN__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_RD_ERR_EN__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_RD_ERR_EN__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_RD_ERR_EN__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_RD_ERR_EN__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_RD_ERR_EN__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH1_RD_ERR_EN__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH2_RD_ERR_EN__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH3_RD_ERR_EN__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH4_RD_ERR_EN__SHIFT 0xc +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH5_RD_ERR_EN__SHIFT 0xd +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH6_RD_ERR_EN__SHIFT 0xe +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH7_RD_ERR_EN__SHIFT 0xf +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_WR_ERR_EN__SHIFT 0x11 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_WR_ERR_EN__SHIFT 0x12 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_WR_ERR_EN__SHIFT 0x13 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_WR_ERR_EN__SHIFT 0x14 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_WR_ERR_EN__SHIFT 0x15 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_WR_ERR_EN__SHIFT 0x16 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_WR_ERR_EN__SHIFT 0x17 +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF1_WR_ERR_EN__SHIFT 0x19 +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF2_WR_ERR_EN__SHIFT 0x1a +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF3_WR_ERR_EN__SHIFT 0x1b +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF4_WR_ERR_EN__SHIFT 0x1c +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF5_WR_ERR_EN__SHIFT 0x1d +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF6_WR_ERR_EN__SHIFT 0x1e +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF7_WR_ERR_EN__SHIFT 0x1f +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_RD_ERR_EN_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_RD_ERR_EN_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_RD_ERR_EN_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_RD_ERR_EN_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_RD_ERR_EN_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_RD_ERR_EN_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_RD_ERR_EN_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH1_RD_ERR_EN_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH2_RD_ERR_EN_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH3_RD_ERR_EN_MASK 0x00000800L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH4_RD_ERR_EN_MASK 0x00001000L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH5_RD_ERR_EN_MASK 0x00002000L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH6_RD_ERR_EN_MASK 0x00004000L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH7_RD_ERR_EN_MASK 0x00008000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_WR_ERR_EN_MASK 0x00020000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_WR_ERR_EN_MASK 0x00040000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_WR_ERR_EN_MASK 0x00080000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_WR_ERR_EN_MASK 0x00100000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_WR_ERR_EN_MASK 0x00200000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_WR_ERR_EN_MASK 0x00400000L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_WR_ERR_EN_MASK 0x00800000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN_MASK 0x01000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF1_WR_ERR_EN_MASK 0x02000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF2_WR_ERR_EN_MASK 0x04000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF3_WR_ERR_EN_MASK 0x08000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF4_WR_ERR_EN_MASK 0x10000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF5_WR_ERR_EN_MASK 0x20000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF6_WR_ERR_EN_MASK 0x40000000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF7_WR_ERR_EN_MASK 0x80000000L +//JPEG_MEMCHECK_SYS_INT_EN1 +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN_MASK 0x00000020L +//JPEG_MEMCHECK_SYS_INT_STAT +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_HI_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_HI_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_HI_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_HI_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_LO_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_LO_ERR__SHIFT 0xc +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_LO_ERR__SHIFT 0xd +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_LO_ERR__SHIFT 0xe +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_LO_ERR__SHIFT 0xf +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_HI_ERR__SHIFT 0x11 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_HI_ERR__SHIFT 0x12 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_HI_ERR__SHIFT 0x13 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_HI_ERR__SHIFT 0x14 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_HI_ERR__SHIFT 0x15 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_HI_ERR__SHIFT 0x16 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_HI_ERR__SHIFT 0x17 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_LO_ERR__SHIFT 0x19 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_LO_ERR__SHIFT 0x1a +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_LO_ERR__SHIFT 0x1b +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_LO_ERR__SHIFT 0x1c +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_LO_ERR__SHIFT 0x1d +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_LO_ERR__SHIFT 0x1e +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_LO_ERR__SHIFT 0x1f +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_HI_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_HI_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_HI_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_HI_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_LO_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_LO_ERR_MASK 0x00000800L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_LO_ERR_MASK 0x00001000L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_LO_ERR_MASK 0x00002000L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_LO_ERR_MASK 0x00004000L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_LO_ERR_MASK 0x00008000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_HI_ERR_MASK 0x00020000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_HI_ERR_MASK 0x00040000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_HI_ERR_MASK 0x00080000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_HI_ERR_MASK 0x00100000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_HI_ERR_MASK 0x00200000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_HI_ERR_MASK 0x00400000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_HI_ERR_MASK 0x00800000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR_MASK 0x01000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_LO_ERR_MASK 0x02000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_LO_ERR_MASK 0x04000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_LO_ERR_MASK 0x08000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_LO_ERR_MASK 0x10000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_LO_ERR_MASK 0x20000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_LO_ERR_MASK 0x40000000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_LO_ERR_MASK 0x80000000L +//JPEG_MEMCHECK_SYS_INT_STAT1 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_HI_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_HI_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_HI_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_HI_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_LO_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_LO_ERR__SHIFT 0xc +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_LO_ERR__SHIFT 0xd +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_LO_ERR__SHIFT 0xe +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_LO_ERR__SHIFT 0xf +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_HI_ERR__SHIFT 0x11 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_HI_ERR__SHIFT 0x12 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_HI_ERR__SHIFT 0x13 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_HI_ERR__SHIFT 0x14 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_HI_ERR__SHIFT 0x15 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_HI_ERR__SHIFT 0x16 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_HI_ERR__SHIFT 0x17 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_LO_ERR__SHIFT 0x19 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_LO_ERR__SHIFT 0x1a +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_LO_ERR__SHIFT 0x1b +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_LO_ERR__SHIFT 0x1c +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_LO_ERR__SHIFT 0x1d +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_LO_ERR__SHIFT 0x1e +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_LO_ERR__SHIFT 0x1f +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_HI_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_HI_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_HI_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_HI_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_LO_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_LO_ERR_MASK 0x00000800L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_LO_ERR_MASK 0x00001000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_LO_ERR_MASK 0x00002000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_LO_ERR_MASK 0x00004000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_LO_ERR_MASK 0x00008000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_HI_ERR_MASK 0x00020000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_HI_ERR_MASK 0x00040000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_HI_ERR_MASK 0x00080000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_HI_ERR_MASK 0x00100000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_HI_ERR_MASK 0x00200000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_HI_ERR_MASK 0x00400000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_HI_ERR_MASK 0x00800000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR_MASK 0x01000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_LO_ERR_MASK 0x02000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_LO_ERR_MASK 0x04000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_LO_ERR_MASK 0x08000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_LO_ERR_MASK 0x10000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_LO_ERR_MASK 0x20000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_LO_ERR_MASK 0x40000000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_LO_ERR_MASK 0x80000000L +//JPEG_MEMCHECK_SYS_INT_STAT2 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR_MASK 0x00000800L +//JPEG_MEMCHECK_SYS_INT_ACK +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_HI_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_HI_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_HI_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_HI_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_LO_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_LO_ERR__SHIFT 0xc +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_LO_ERR__SHIFT 0xd +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_LO_ERR__SHIFT 0xe +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_LO_ERR__SHIFT 0xf +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_HI_ERR__SHIFT 0x11 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_HI_ERR__SHIFT 0x12 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_HI_ERR__SHIFT 0x13 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_HI_ERR__SHIFT 0x14 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_HI_ERR__SHIFT 0x15 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_HI_ERR__SHIFT 0x16 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_HI_ERR__SHIFT 0x17 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_LO_ERR__SHIFT 0x19 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_LO_ERR__SHIFT 0x1a +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_LO_ERR__SHIFT 0x1b +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_LO_ERR__SHIFT 0x1c +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_LO_ERR__SHIFT 0x1d +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_LO_ERR__SHIFT 0x1e +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_LO_ERR__SHIFT 0x1f +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_HI_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_HI_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_HI_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_HI_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_LO_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_LO_ERR_MASK 0x00000800L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_LO_ERR_MASK 0x00001000L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_LO_ERR_MASK 0x00002000L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_LO_ERR_MASK 0x00004000L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_LO_ERR_MASK 0x00008000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_HI_ERR_MASK 0x00020000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_HI_ERR_MASK 0x00040000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_HI_ERR_MASK 0x00080000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_HI_ERR_MASK 0x00100000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_HI_ERR_MASK 0x00200000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_HI_ERR_MASK 0x00400000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_HI_ERR_MASK 0x00800000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR_MASK 0x01000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_LO_ERR_MASK 0x02000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_LO_ERR_MASK 0x04000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_LO_ERR_MASK 0x08000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_LO_ERR_MASK 0x10000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_LO_ERR_MASK 0x20000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_LO_ERR_MASK 0x40000000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_LO_ERR_MASK 0x80000000L +//JPEG_MEMCHECK_SYS_INT_ACK1 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_HI_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_HI_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_HI_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_HI_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_LO_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_LO_ERR__SHIFT 0xc +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_LO_ERR__SHIFT 0xd +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_LO_ERR__SHIFT 0xe +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_LO_ERR__SHIFT 0xf +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_HI_ERR__SHIFT 0x11 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_HI_ERR__SHIFT 0x12 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_HI_ERR__SHIFT 0x13 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_HI_ERR__SHIFT 0x14 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_HI_ERR__SHIFT 0x15 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_HI_ERR__SHIFT 0x16 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_HI_ERR__SHIFT 0x17 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_LO_ERR__SHIFT 0x19 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_LO_ERR__SHIFT 0x1a +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_LO_ERR__SHIFT 0x1b +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_LO_ERR__SHIFT 0x1c +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_LO_ERR__SHIFT 0x1d +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_LO_ERR__SHIFT 0x1e +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_LO_ERR__SHIFT 0x1f +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_HI_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_HI_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_HI_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_HI_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_LO_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_LO_ERR_MASK 0x00000800L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_LO_ERR_MASK 0x00001000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_LO_ERR_MASK 0x00002000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_LO_ERR_MASK 0x00004000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_LO_ERR_MASK 0x00008000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_HI_ERR_MASK 0x00020000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_HI_ERR_MASK 0x00040000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_HI_ERR_MASK 0x00080000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_HI_ERR_MASK 0x00100000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_HI_ERR_MASK 0x00200000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_HI_ERR_MASK 0x00400000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_HI_ERR_MASK 0x00800000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR_MASK 0x01000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_LO_ERR_MASK 0x02000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_LO_ERR_MASK 0x04000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_LO_ERR_MASK 0x08000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_LO_ERR_MASK 0x10000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_LO_ERR_MASK 0x20000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_LO_ERR_MASK 0x40000000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_LO_ERR_MASK 0x80000000L +//JPEG_MEMCHECK_SYS_INT_ACK2 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR_MASK 0x00000800L +//JPEG_MASTINT_EN +#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//JPEG_IH_CTRL +#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0 +#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1 +#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2 +#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3 +#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7 +#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13 +#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L +#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L +#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L +#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L +#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L +#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L +//JRBBM_ARB_CTRL +#define JRBBM_ARB_CTRL__DJRBC0_DROP__SHIFT 0x0 +#define JRBBM_ARB_CTRL__DJRBC1_DROP__SHIFT 0x1 +#define JRBBM_ARB_CTRL__DJRBC2_DROP__SHIFT 0x2 +#define JRBBM_ARB_CTRL__DJRBC3_DROP__SHIFT 0x3 +#define JRBBM_ARB_CTRL__DJRBC4_DROP__SHIFT 0x4 +#define JRBBM_ARB_CTRL__DJRBC5_DROP__SHIFT 0x5 +#define JRBBM_ARB_CTRL__DJRBC6_DROP__SHIFT 0x6 +#define JRBBM_ARB_CTRL__DJRBC7_DROP__SHIFT 0x7 +#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x8 +#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x9 +#define JRBBM_ARB_CTRL__DJRBC0_DROP_MASK 0x00000001L +#define JRBBM_ARB_CTRL__DJRBC1_DROP_MASK 0x00000002L +#define JRBBM_ARB_CTRL__DJRBC2_DROP_MASK 0x00000004L +#define JRBBM_ARB_CTRL__DJRBC3_DROP_MASK 0x00000008L +#define JRBBM_ARB_CTRL__DJRBC4_DROP_MASK 0x00000010L +#define JRBBM_ARB_CTRL__DJRBC5_DROP_MASK 0x00000020L +#define JRBBM_ARB_CTRL__DJRBC6_DROP_MASK 0x00000040L +#define JRBBM_ARB_CTRL__DJRBC7_DROP_MASK 0x00000080L +#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000100L +#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000200L + + +// addressBlock: aid_uvd0_uvd_jpeg_common_sclk_dec +//JPEG_CGC_GATE +#define JPEG_CGC_GATE__JPEG0_DEC__SHIFT 0x0 +#define JPEG_CGC_GATE__JPEG1_DEC__SHIFT 0x1 +#define JPEG_CGC_GATE__JPEG2_DEC__SHIFT 0x2 +#define JPEG_CGC_GATE__JPEG3_DEC__SHIFT 0x3 +#define JPEG_CGC_GATE__JPEG4_DEC__SHIFT 0x4 +#define JPEG_CGC_GATE__JPEG5_DEC__SHIFT 0x5 +#define JPEG_CGC_GATE__JPEG6_DEC__SHIFT 0x6 +#define JPEG_CGC_GATE__JPEG7_DEC__SHIFT 0x7 +#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x8 +#define JPEG_CGC_GATE__JMCIF__SHIFT 0x9 +#define JPEG_CGC_GATE__JRBBM__SHIFT 0xa +#define JPEG_CGC_GATE__JPEG0_DEC_MASK 0x00000001L +#define JPEG_CGC_GATE__JPEG1_DEC_MASK 0x00000002L +#define JPEG_CGC_GATE__JPEG2_DEC_MASK 0x00000004L +#define JPEG_CGC_GATE__JPEG3_DEC_MASK 0x00000008L +#define JPEG_CGC_GATE__JPEG4_DEC_MASK 0x00000010L +#define JPEG_CGC_GATE__JPEG5_DEC_MASK 0x00000020L +#define JPEG_CGC_GATE__JPEG6_DEC_MASK 0x00000040L +#define JPEG_CGC_GATE__JPEG7_DEC_MASK 0x00000080L +#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000100L +#define JPEG_CGC_GATE__JMCIF_MASK 0x00000200L +#define JPEG_CGC_GATE__JRBBM_MASK 0x00000400L +//JPEG_CGC_CTRL +#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 +#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1 +#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5 +#define JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT 0x10 +#define JPEG_CGC_CTRL__JPEG1_DEC_MODE__SHIFT 0x11 +#define JPEG_CGC_CTRL__JPEG2_DEC_MODE__SHIFT 0x12 +#define JPEG_CGC_CTRL__JPEG3_DEC_MODE__SHIFT 0x13 +#define JPEG_CGC_CTRL__JPEG4_DEC_MODE__SHIFT 0x14 +#define JPEG_CGC_CTRL__JPEG5_DEC_MODE__SHIFT 0x15 +#define JPEG_CGC_CTRL__JPEG6_DEC_MODE__SHIFT 0x16 +#define JPEG_CGC_CTRL__JPEG7_DEC_MODE__SHIFT 0x17 +#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x18 +#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x19 +#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x1a +#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L +#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL +#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x00001FE0L +#define JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK 0x00010000L +#define JPEG_CGC_CTRL__JPEG1_DEC_MODE_MASK 0x00020000L +#define JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 0x00040000L +#define JPEG_CGC_CTRL__JPEG3_DEC_MODE_MASK 0x00080000L +#define JPEG_CGC_CTRL__JPEG4_DEC_MODE_MASK 0x00100000L +#define JPEG_CGC_CTRL__JPEG5_DEC_MODE_MASK 0x00200000L +#define JPEG_CGC_CTRL__JPEG6_DEC_MODE_MASK 0x00400000L +#define JPEG_CGC_CTRL__JPEG7_DEC_MODE_MASK 0x00800000L +#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x01000000L +#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x02000000L +#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x04000000L +//JPEG_CGC_STATUS +#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE__SHIFT 0x0 +#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE__SHIFT 0x1 +#define JPEG_CGC_STATUS__JPEG1_DEC_VCLK_ACTIVE__SHIFT 0x2 +#define JPEG_CGC_STATUS__JPEG1_DEC_SCLK_ACTIVE__SHIFT 0x3 +#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE__SHIFT 0x4 +#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE__SHIFT 0x5 +#define JPEG_CGC_STATUS__JPEG3_DEC_VCLK_ACTIVE__SHIFT 0x6 +#define JPEG_CGC_STATUS__JPEG3_DEC_SCLK_ACTIVE__SHIFT 0x7 +#define JPEG_CGC_STATUS__JPEG4_DEC_VCLK_ACTIVE__SHIFT 0x8 +#define JPEG_CGC_STATUS__JPEG4_DEC_SCLK_ACTIVE__SHIFT 0x9 +#define JPEG_CGC_STATUS__JPEG5_DEC_VCLK_ACTIVE__SHIFT 0xa +#define JPEG_CGC_STATUS__JPEG5_DEC_SCLK_ACTIVE__SHIFT 0xb +#define JPEG_CGC_STATUS__JPEG6_DEC_VCLK_ACTIVE__SHIFT 0xc +#define JPEG_CGC_STATUS__JPEG6_DEC_SCLK_ACTIVE__SHIFT 0xd +#define JPEG_CGC_STATUS__JPEG7_DEC_VCLK_ACTIVE__SHIFT 0xe +#define JPEG_CGC_STATUS__JPEG7_DEC_SCLK_ACTIVE__SHIFT 0xf +#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x10 +#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x11 +#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x12 +#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x13 +#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x14 +#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE_MASK 0x00000001L +#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE_MASK 0x00000002L +#define JPEG_CGC_STATUS__JPEG1_DEC_VCLK_ACTIVE_MASK 0x00000004L +#define JPEG_CGC_STATUS__JPEG1_DEC_SCLK_ACTIVE_MASK 0x00000008L +#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE_MASK 0x00000010L +#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE_MASK 0x00000020L +#define JPEG_CGC_STATUS__JPEG3_DEC_VCLK_ACTIVE_MASK 0x00000040L +#define JPEG_CGC_STATUS__JPEG3_DEC_SCLK_ACTIVE_MASK 0x00000080L +#define JPEG_CGC_STATUS__JPEG4_DEC_VCLK_ACTIVE_MASK 0x00000100L +#define JPEG_CGC_STATUS__JPEG4_DEC_SCLK_ACTIVE_MASK 0x00000200L +#define JPEG_CGC_STATUS__JPEG5_DEC_VCLK_ACTIVE_MASK 0x00000400L +#define JPEG_CGC_STATUS__JPEG5_DEC_SCLK_ACTIVE_MASK 0x00000800L +#define JPEG_CGC_STATUS__JPEG6_DEC_VCLK_ACTIVE_MASK 0x00001000L +#define JPEG_CGC_STATUS__JPEG6_DEC_SCLK_ACTIVE_MASK 0x00002000L +#define JPEG_CGC_STATUS__JPEG7_DEC_VCLK_ACTIVE_MASK 0x00004000L +#define JPEG_CGC_STATUS__JPEG7_DEC_SCLK_ACTIVE_MASK 0x00008000L +#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00010000L +#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00020000L +#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00040000L +#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00080000L +#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00100000L +//JPEG_COMN_CGC_MEM_CTRL +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN__SHIFT 0x3 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN_MASK 0x00000008L +//JPEG_DEC_CGC_MEM_CTRL +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN__SHIFT 0x0 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN__SHIFT 0x1 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN__SHIFT 0x2 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN__SHIFT 0x3 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_EN__SHIFT 0x4 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_DS_EN__SHIFT 0x5 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_SD_EN__SHIFT 0x6 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_SW_EN__SHIFT 0x7 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN__SHIFT 0x8 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN__SHIFT 0x9 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN__SHIFT 0xa +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_SW_EN__SHIFT 0xb +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_EN__SHIFT 0xc +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_DS_EN__SHIFT 0xd +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_SD_EN__SHIFT 0xe +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_SW_EN__SHIFT 0xf +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_EN__SHIFT 0x10 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_DS_EN__SHIFT 0x11 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_SD_EN__SHIFT 0x12 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_SW_EN__SHIFT 0x13 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_EN__SHIFT 0x14 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_DS_EN__SHIFT 0x15 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_SD_EN__SHIFT 0x16 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_SW_EN__SHIFT 0x17 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_EN__SHIFT 0x18 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_DS_EN__SHIFT 0x19 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_SD_EN__SHIFT 0x1a +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_SW_EN__SHIFT 0x1b +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_EN__SHIFT 0x1c +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_DS_EN__SHIFT 0x1d +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_SD_EN__SHIFT 0x1e +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_SW_EN__SHIFT 0x1f +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN_MASK 0x00000001L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN_MASK 0x00000002L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN_MASK 0x00000004L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN_MASK 0x00000008L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_EN_MASK 0x00000010L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_DS_EN_MASK 0x00000020L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_SD_EN_MASK 0x00000040L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_SW_EN_MASK 0x00000080L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN_MASK 0x00000100L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN_MASK 0x00000200L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN_MASK 0x00000400L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_SW_EN_MASK 0x00000800L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_EN_MASK 0x00001000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_DS_EN_MASK 0x00002000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_SD_EN_MASK 0x00004000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_SW_EN_MASK 0x00008000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_EN_MASK 0x00010000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_DS_EN_MASK 0x00020000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_SD_EN_MASK 0x00040000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_SW_EN_MASK 0x00080000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_EN_MASK 0x00100000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_DS_EN_MASK 0x00200000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_SD_EN_MASK 0x00400000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_SW_EN_MASK 0x00800000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_EN_MASK 0x01000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_DS_EN_MASK 0x02000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_SD_EN_MASK 0x04000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_SW_EN_MASK 0x08000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_EN_MASK 0x10000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_DS_EN_MASK 0x20000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_SD_EN_MASK 0x40000000L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_SW_EN_MASK 0x80000000L +//JPEG_ENC_CGC_MEM_CTRL +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN__SHIFT 0x3 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN_MASK 0x00000008L +//JPEG_PERF_BANK_CONF +#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0 +#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8 +#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10 +#define JPEG_PERF_BANK_CONF__CORE_SEL__SHIFT 0x15 +#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL +#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L +#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L +#define JPEG_PERF_BANK_CONF__CORE_SEL_MASK 0x00E00000L +//JPEG_PERF_BANK_EVENT_SEL +#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0 +#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8 +#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10 +#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18 +#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL +#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L +#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L +#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L +//JPEG_PERF_BANK_COUNT0 +#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT1 +#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT2 +#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT3 +#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_pg_dec +//UVD_PGFSM_CONFIG +#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 0x0 +#define UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 0x2 +#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 0x4 +#define UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 0x6 +#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 0x8 +#define UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 0xa +#define UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 0xc +#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 0xe +#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 0x10 +#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 0x12 +#define UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 0x14 +#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT 0x16 +#define UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 0x18 +#define UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 0x1a +#define UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT 0x1c +#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG_MASK 0x00000003L +#define UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG_MASK 0x0000000CL +#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG_MASK 0x00000030L +#define UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG_MASK 0x000000C0L +#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG_MASK 0x00000300L +#define UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG_MASK 0x00000C00L +#define UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG_MASK 0x00003000L +#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG_MASK 0x0000C000L +#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG_MASK 0x00030000L +#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG_MASK 0x000C0000L +#define UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG_MASK 0x00300000L +#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG_MASK 0x00C00000L +#define UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG_MASK 0x03000000L +#define UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG_MASK 0x0C000000L +#define UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG_MASK 0x30000000L +//UVD_PGFSM_STATUS +#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 0x0 +#define UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT 0x2 +#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 0x4 +#define UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT 0x6 +#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 0x8 +#define UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT 0xa +#define UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT 0xc +#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 0xe +#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 0x10 +#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 0x12 +#define UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT 0x14 +#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT 0x16 +#define UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT 0x18 +#define UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT 0x1a +#define UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT 0x1c +#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS_MASK 0x00000003L +#define UVD_PGFSM_STATUS__UVDS_PWR_STATUS_MASK 0x0000000CL +#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS_MASK 0x00000030L +#define UVD_PGFSM_STATUS__UVDTC_PWR_STATUS_MASK 0x000000C0L +#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS_MASK 0x00000300L +#define UVD_PGFSM_STATUS__UVDTA_PWR_STATUS_MASK 0x00000C00L +#define UVD_PGFSM_STATUS__UVDLM_PWR_STATUS_MASK 0x00003000L +#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS_MASK 0x0000C000L +#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS_MASK 0x00030000L +#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS_MASK 0x000C0000L +#define UVD_PGFSM_STATUS__UVDAB_PWR_STATUS_MASK 0x00300000L +#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK 0x00C00000L +#define UVD_PGFSM_STATUS__UVDTB_PWR_STATUS_MASK 0x03000000L +#define UVD_PGFSM_STATUS__UVDNA_PWR_STATUS_MASK 0x0C000000L +#define UVD_PGFSM_STATUS__UVDNB_PWR_STATUS_MASK 0x30000000L +//UVD_POWER_STATUS +#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0 +#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2 +#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4 +#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8 +#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9 +#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb +#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f +#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000003L +#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L +#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L +#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L +#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L +#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L +#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L +//UVD_JPEG_POWER_STATUS +#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0 +#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4 +#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8 +#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9 +#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f +#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L +#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L +#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L +#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L +#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L +//UVD_MC_DJPEG_RD_SPACE +#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE__SHIFT 0x0 +#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE_MASK 0x0003FFFFL +//UVD_MC_DJPEG_WR_SPACE +#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE__SHIFT 0x0 +#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE_MASK 0x0003FFFFL +//UVD_MC_EJPEG_RD_SPACE +#define UVD_MC_EJPEG_RD_SPACE__EJPEG_RD_SPACE__SHIFT 0x0 +#define UVD_MC_EJPEG_RD_SPACE__EJPEG_RD_SPACE_MASK 0x0003FFFFL +//UVD_MC_EJPEG_WR_SPACE +#define UVD_MC_EJPEG_WR_SPACE__EJPEG_WR_SPACE__SHIFT 0x0 +#define UVD_MC_EJPEG_WR_SPACE__EJPEG_WR_SPACE_MASK 0x0003FFFFL +//UVD_PG_IND_INDEX +#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL +//UVD_PG_IND_DATA +#define UVD_PG_IND_DATA__DATA__SHIFT 0x0 +#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL +//CC_UVD_HARVESTING +#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0 +#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1 +#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L +#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L +//UVD_DPG_LMA_CTL +#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0 +#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1 +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2 +#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4 +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10 +#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L +#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L +#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L +//UVD_DPG_LMA_DATA +#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0 +#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL +//UVD_DPG_LMA_MASK +#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0 +#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL +//UVD_DPG_PAUSE +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L +//UVD_SCRATCH1 +#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0 +#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH2 +#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0 +#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH3 +#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0 +#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH4 +#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0 +#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH5 +#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0 +#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH6 +#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0 +#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH7 +#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0 +#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH8 +#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0 +#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH9 +#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0 +#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH10 +#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0 +#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH11 +#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0 +#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH12 +#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0 +#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH13 +#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0 +#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH14 +#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0 +#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL +//UVD_FREE_COUNTER_REG +#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0 +#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_DPG_VCPU_CACHE_OFFSET0 +#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0 +#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_VMID +#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL +//UVD_REG_FILTER_EN +#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN__SHIFT 0x0 +#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV__SHIFT 0x1 +#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN__SHIFT 0x2 +#define UVD_REG_FILTER_EN__JPEG_PRIV_EN__SHIFT 0x3 +#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN_MASK 0x00000001L +#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV_MASK 0x00000002L +#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN_MASK 0x00000004L +#define UVD_REG_FILTER_EN__JPEG_PRIV_EN_MASK 0x00000008L +//UVD_SECURITY_REG_VIO_REPORT +#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO__SHIFT 0x0 +#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO__SHIFT 0x1 +#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO__SHIFT 0x2 +#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO__SHIFT 0x3 +#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO__SHIFT 0x4 +#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO__SHIFT 0x5 +#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO_MASK 0x00000001L +#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO_MASK 0x00000002L +#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO_MASK 0x00000004L +#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO_MASK 0x00000008L +#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO_MASK 0x00000010L +#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO_MASK 0x00000020L +//UVD_FW_VERSION +#define UVD_FW_VERSION__FW_VERSION__SHIFT 0x0 +#define UVD_FW_VERSION__FW_VERSION_MASK 0xFFFFFFFFL +//UVD_PF_STATUS +#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0 +#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1 +#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2 +#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3 +#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4 +#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5 +#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6 +#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7 +#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8 +#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9 +#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa +#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb +#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc +#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd +#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe +#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf +#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10 +#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11 +#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12 +#define UVD_PF_STATUS__JPEG2_PF_OCCURED__SHIFT 0x13 +#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED__SHIFT 0x14 +#define UVD_PF_STATUS__JPEG2_PF_CLEAR__SHIFT 0x15 +#define UVD_PF_STATUS__ENCODER5_PF_OCCURED__SHIFT 0x16 +#define UVD_PF_STATUS__ENCODER5_PF_CLEAR__SHIFT 0x17 +#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L +#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L +#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L +#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L +#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L +#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L +#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L +#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L +#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L +#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L +#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L +#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L +#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L +#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L +#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L +#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L +#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L +#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L +#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L +#define UVD_PF_STATUS__JPEG2_PF_OCCURED_MASK 0x00080000L +#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED_MASK 0x00100000L +#define UVD_PF_STATUS__JPEG2_PF_CLEAR_MASK 0x00200000L +#define UVD_PF_STATUS__ENCODER5_PF_OCCURED_MASK 0x00400000L +#define UVD_PF_STATUS__ENCODER5_PF_CLEAR_MASK 0x00800000L +//UVD_DPG_CLK_EN_VCPU_REPORT +#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0 +#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1 +#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L +#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL +//CC_UVD_VCPU_ERR_DETECT_BOT_LO +#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO__SHIFT 0xc +#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO_MASK 0xFFFFF000L +//CC_UVD_VCPU_ERR_DETECT_BOT_HI +#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI_MASK 0x0000FFFFL +//CC_UVD_VCPU_ERR_DETECT_TOP_LO +#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO__SHIFT 0xc +#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO_MASK 0xFFFFF000L +//CC_UVD_VCPU_ERR_DETECT_TOP_HI +#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI_MASK 0x0000FFFFL +//CC_UVD_VCPU_ERR +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS__SHIFT 0x0 +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR__SHIFT 0x1 +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN__SHIFT 0x2 +#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS__SHIFT 0x3 +#define CC_UVD_VCPU_ERR__RESET_ON_FAULT__SHIFT 0x4 +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS_MASK 0x00000001L +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR_MASK 0x00000002L +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN_MASK 0x00000004L +#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS_MASK 0x00000008L +#define CC_UVD_VCPU_ERR__RESET_ON_FAULT_MASK 0x00000010L +//CC_UVD_VCPU_ERR_INST_ADDR_LO +#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO_MASK 0xFFFFFFFFL +//CC_UVD_VCPU_ERR_INST_ADDR_HI +#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI_MASK 0x0000FFFFL +//UVD_LMI_MMSCH_NC_SPACE +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE__SHIFT 0x3 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE__SHIFT 0x6 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE__SHIFT 0x9 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE__SHIFT 0xc +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE__SHIFT 0xf +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE__SHIFT 0x12 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE__SHIFT 0x15 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE_MASK 0x00000007L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE_MASK 0x00000038L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE_MASK 0x000001C0L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE_MASK 0x00000E00L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE_MASK 0x00007000L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE_MASK 0x00038000L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE_MASK 0x001C0000L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE_MASK 0x00E00000L +//UVD_LMI_ATOMIC_SPACE +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE__SHIFT 0x0 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE__SHIFT 0x3 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE__SHIFT 0x6 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE__SHIFT 0x9 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE_MASK 0x00000007L +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE_MASK 0x00000038L +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE_MASK 0x000001C0L +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE_MASK 0x00000E00L +//UVD_GFX8_ADDR_CONFIG +#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4 +#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L +//UVD_GFX10_ADDR_CONFIG +#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6 +#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 +#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L +#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L +#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//UVD_GPCNT2_CNTL +#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0 +#define UVD_GPCNT2_CNTL__START__SHIFT 0x1 +#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2 +#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L +#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L +#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L +//UVD_GPCNT2_TARGET_LOWER +#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0 +#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL +//UVD_GPCNT2_STATUS_LOWER +#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0 +#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_GPCNT2_TARGET_UPPER +#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0 +#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL +//UVD_GPCNT2_STATUS_UPPER +#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0 +#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL +//UVD_GPCNT3_CNTL +#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0 +#define UVD_GPCNT3_CNTL__START__SHIFT 0x1 +#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2 +#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3 +#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa +#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L +#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L +#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L +#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L +#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L +//UVD_GPCNT3_TARGET_LOWER +#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0 +#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL +//UVD_GPCNT3_STATUS_LOWER +#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0 +#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_GPCNT3_TARGET_UPPER +#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0 +#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL +//UVD_GPCNT3_STATUS_UPPER +#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0 +#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL +//UVD_VCLK_DS_CNTL +#define UVD_VCLK_DS_CNTL__VCLK_DS_EN__SHIFT 0x0 +#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS__SHIFT 0x4 +#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT__SHIFT 0x10 +#define UVD_VCLK_DS_CNTL__VCLK_DS_EN_MASK 0x00000001L +#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS_MASK 0x00000010L +#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L +//UVD_DCLK_DS_CNTL +#define UVD_DCLK_DS_CNTL__DCLK_DS_EN__SHIFT 0x0 +#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS__SHIFT 0x4 +#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT__SHIFT 0x10 +#define UVD_DCLK_DS_CNTL__DCLK_DS_EN_MASK 0x00000001L +#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS_MASK 0x00000010L +#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L +//UVD_TSC_LOWER +#define UVD_TSC_LOWER__COUNT__SHIFT 0x0 +#define UVD_TSC_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_TSC_UPPER +#define UVD_TSC_UPPER__COUNT__SHIFT 0x0 +#define UVD_TSC_UPPER__COUNT_MASK 0x00FFFFFFL +//VCN_FEATURES +#define VCN_FEATURES__HAS_VIDEO_DEC__SHIFT 0x0 +#define VCN_FEATURES__HAS_VIDEO_ENC__SHIFT 0x1 +#define VCN_FEATURES__HAS_MJPEG_DEC__SHIFT 0x2 +#define VCN_FEATURES__HAS_MJPEG_ENC__SHIFT 0x3 +#define VCN_FEATURES__HAS_VIDEO_VIRT__SHIFT 0x4 +#define VCN_FEATURES__HAS_H264_LEGACY_DEC__SHIFT 0x5 +#define VCN_FEATURES__HAS_UDEC_DEC__SHIFT 0x6 +#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7 +#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8 +#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9 +#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa +#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb +#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc +#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd +#define VCN_FEATURES__HAS_AV1_ENC__SHIFT 0xe +#define VCN_FEATURES__INSTANCE_ID__SHIFT 0x1c +#define VCN_FEATURES__HAS_VIDEO_DEC_MASK 0x00000001L +#define VCN_FEATURES__HAS_VIDEO_ENC_MASK 0x00000002L +#define VCN_FEATURES__HAS_MJPEG_DEC_MASK 0x00000004L +#define VCN_FEATURES__HAS_MJPEG_ENC_MASK 0x00000008L +#define VCN_FEATURES__HAS_VIDEO_VIRT_MASK 0x00000010L +#define VCN_FEATURES__HAS_H264_LEGACY_DEC_MASK 0x00000020L +#define VCN_FEATURES__HAS_UDEC_DEC_MASK 0x00000040L +#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L +#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L +#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L +#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L +#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L +#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L +#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L +#define VCN_FEATURES__HAS_AV1_ENC_MASK 0x00004000L +#define VCN_FEATURES__INSTANCE_ID_MASK 0xF0000000L +//UVD_GPUIOV_STATUS +#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0 +#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L +//UVD_RAS_VCPU_VCODEC_STATUS +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L +//UVD_RAS_MMSCH_FATAL_ERROR +#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_PF_MASK 0x80000000L +//UVD_RAS_JPEG0_STATUS +#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L +//UVD_RAS_JPEG1_STATUS +#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L +//UVD_RAS_CNTL_PMI_ARB +#define UVD_RAS_CNTL_PMI_ARB__STAT_VCPU_VCODEC__SHIFT 0x0 +#define UVD_RAS_CNTL_PMI_ARB__ACK_VCPU_VCODEC__SHIFT 0x1 +#define UVD_RAS_CNTL_PMI_ARB__STAT_MMSCH__SHIFT 0x2 +#define UVD_RAS_CNTL_PMI_ARB__ACK_MMSCH__SHIFT 0x3 +#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG0__SHIFT 0x4 +#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG0__SHIFT 0x5 +#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG1__SHIFT 0x6 +#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG1__SHIFT 0x7 +#define UVD_RAS_CNTL_PMI_ARB__STAT_VCPU_VCODEC_MASK 0x00000001L +#define UVD_RAS_CNTL_PMI_ARB__ACK_VCPU_VCODEC_MASK 0x00000002L +#define UVD_RAS_CNTL_PMI_ARB__STAT_MMSCH_MASK 0x00000004L +#define UVD_RAS_CNTL_PMI_ARB__ACK_MMSCH_MASK 0x00000008L +#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG0_MASK 0x00000010L +#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG0_MASK 0x00000020L +#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG1_MASK 0x00000040L +#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG1_MASK 0x00000080L +//UVD_SCRATCH15 +#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0 +#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL +//VCN_JPEG_DB_CTRL1 +#define VCN_JPEG_DB_CTRL1__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL1__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL1__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL1__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL1__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL1__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL2 +#define VCN_JPEG_DB_CTRL2__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL2__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL2__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL2__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL2__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL2__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL3 +#define VCN_JPEG_DB_CTRL3__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL3__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL3__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL3__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL3__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL3__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL4 +#define VCN_JPEG_DB_CTRL4__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL4__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL4__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL4__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL4__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL4__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL5 +#define VCN_JPEG_DB_CTRL5__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL5__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL5__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL5__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL5__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL5__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL6 +#define VCN_JPEG_DB_CTRL6__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL6__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL6__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL6__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL6__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL6__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL7 +#define VCN_JPEG_DB_CTRL7__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL7__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL7__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL7__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL7__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL7__HIT_MASK 0x80000000L +//UVD_SCRATCH32 +#define UVD_SCRATCH32__SCRATCH32_DATA__SHIFT 0x0 +#define UVD_SCRATCH32__SCRATCH32_DATA_MASK 0xFFFFFFFFL +//UVD_VERSION +#define UVD_VERSION__VARIANT_TYPE__SHIFT 0x0 +#define UVD_VERSION__MINOR_VERSION__SHIFT 0x8 +#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10 +#define UVD_VERSION__INSTANCE_ID__SHIFT 0x1c +#define UVD_VERSION__VARIANT_TYPE_MASK 0x000000FFL +#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FF00L +#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L +#define UVD_VERSION__INSTANCE_ID_MASK 0xF0000000L +//VCN_RB_DB_CTRL +#define VCN_RB_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB_DB_CTRL__HIT_MASK 0x80000000L +//VCN_JPEG_DB_CTRL +#define VCN_JPEG_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB1_DB_CTRL +#define VCN_RB1_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB1_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB1_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB1_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB1_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB1_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB2_DB_CTRL +#define VCN_RB2_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB2_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB2_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB2_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB2_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB2_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB3_DB_CTRL +#define VCN_RB3_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB3_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB3_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB3_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB3_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB3_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB4_DB_CTRL +#define VCN_RB4_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB4_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB4_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB4_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB4_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB4_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB_ENABLE +#define VCN_RB_ENABLE__RB_EN__SHIFT 0x0 +#define VCN_RB_ENABLE__JPEG_RB_EN__SHIFT 0x1 +#define VCN_RB_ENABLE__RB1_EN__SHIFT 0x2 +#define VCN_RB_ENABLE__RB2_EN__SHIFT 0x3 +#define VCN_RB_ENABLE__RB3_EN__SHIFT 0x4 +#define VCN_RB_ENABLE__RB4_EN__SHIFT 0x5 +#define VCN_RB_ENABLE__UMSCH_RB_EN__SHIFT 0x6 +#define VCN_RB_ENABLE__EJPEG_RB_EN__SHIFT 0x7 +#define VCN_RB_ENABLE__AUDIO_RB_EN__SHIFT 0x8 +#define VCN_RB_ENABLE__RB_EN_MASK 0x00000001L +#define VCN_RB_ENABLE__JPEG_RB_EN_MASK 0x00000002L +#define VCN_RB_ENABLE__RB1_EN_MASK 0x00000004L +#define VCN_RB_ENABLE__RB2_EN_MASK 0x00000008L +#define VCN_RB_ENABLE__RB3_EN_MASK 0x00000010L +#define VCN_RB_ENABLE__RB4_EN_MASK 0x00000020L +#define VCN_RB_ENABLE__UMSCH_RB_EN_MASK 0x00000040L +#define VCN_RB_ENABLE__EJPEG_RB_EN_MASK 0x00000080L +#define VCN_RB_ENABLE__AUDIO_RB_EN_MASK 0x00000100L +//VCN_RB_WPTR_CTRL +#define VCN_RB_WPTR_CTRL__RB_CS_EN__SHIFT 0x0 +#define VCN_RB_WPTR_CTRL__JPEG_CS_EN__SHIFT 0x1 +#define VCN_RB_WPTR_CTRL__RB1_CS_EN__SHIFT 0x2 +#define VCN_RB_WPTR_CTRL__RB2_CS_EN__SHIFT 0x3 +#define VCN_RB_WPTR_CTRL__RB3_CS_EN__SHIFT 0x4 +#define VCN_RB_WPTR_CTRL__RB4_CS_EN__SHIFT 0x5 +#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN__SHIFT 0x6 +#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN__SHIFT 0x7 +#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN__SHIFT 0x8 +#define VCN_RB_WPTR_CTRL__RB_CS_EN_MASK 0x00000001L +#define VCN_RB_WPTR_CTRL__JPEG_CS_EN_MASK 0x00000002L +#define VCN_RB_WPTR_CTRL__RB1_CS_EN_MASK 0x00000004L +#define VCN_RB_WPTR_CTRL__RB2_CS_EN_MASK 0x00000008L +#define VCN_RB_WPTR_CTRL__RB3_CS_EN_MASK 0x00000010L +#define VCN_RB_WPTR_CTRL__RB4_CS_EN_MASK 0x00000020L +#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN_MASK 0x00000040L +#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN_MASK 0x00000080L +#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN_MASK 0x00000100L +//UVD_RB_RPTR +#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR +#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_RPTR2 +#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR2 +#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_RPTR3 +#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR3 +#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_RPTR4 +#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR4 +#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L +//UVD_OUT_RB_RPTR +#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_OUT_RB_WPTR +#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_AUDIO_RB_RPTR +#define UVD_AUDIO_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_AUDIO_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_AUDIO_RB_WPTR +#define UVD_AUDIO_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_AUDIO_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RBC_RB_RPTR +#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_RBC_RB_WPTR +#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_DPG_LMA_CTL2 +#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL__SHIFT 0x0 +#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN__SHIFT 0x1 +#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR__SHIFT 0x2 +#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR__SHIFT 0x9 +#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL_MASK 0x00000001L +#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN_MASK 0x00000002L +#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR_MASK 0x000001FCL +#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L + + +// addressBlock: aid_uvd0_mmsch_dec +//MMSCH_UCODE_ADDR +#define MMSCH_UCODE_ADDR__UCODE_ADDR__SHIFT 0x2 +#define MMSCH_UCODE_ADDR__UCODE_LOCK__SHIFT 0x1f +#define MMSCH_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFCL +#define MMSCH_UCODE_ADDR__UCODE_LOCK_MASK 0x80000000L +//MMSCH_UCODE_DATA +#define MMSCH_UCODE_DATA__UCODE_DATA__SHIFT 0x0 +#define MMSCH_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL +//MMSCH_SRAM_ADDR +#define MMSCH_SRAM_ADDR__SRAM_ADDR__SHIFT 0x2 +#define MMSCH_SRAM_ADDR__SRAM_LOCK__SHIFT 0x1f +#define MMSCH_SRAM_ADDR__SRAM_ADDR_MASK 0x00001FFCL +#define MMSCH_SRAM_ADDR__SRAM_LOCK_MASK 0x80000000L +//MMSCH_SRAM_DATA +#define MMSCH_SRAM_DATA__SRAM_DATA__SHIFT 0x0 +#define MMSCH_SRAM_DATA__SRAM_DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_SRAM_OFFSET +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET__SHIFT 0x2 +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF__SHIFT 0x10 +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET_MASK 0x00001FFCL +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF_MASK 0x00FF0000L +//MMSCH_DB_SRAM_OFFSET +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET__SHIFT 0x2 +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG__SHIFT 0x10 +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG__SHIFT 0x18 +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET_MASK 0x00001FFCL +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG_MASK 0x00FF0000L +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG_MASK 0xFF000000L +//MMSCH_CTX_SRAM_OFFSET +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET__SHIFT 0x2 +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE__SHIFT 0x10 +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET_MASK 0x00001FFCL +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE_MASK 0xFFFF0000L +//MMSCH_CTL +#define MMSCH_CTL__P_RUNSTALL__SHIFT 0x0 +#define MMSCH_CTL__P_RESET__SHIFT 0x1 +#define MMSCH_CTL__VFID_FIFO_EN__SHIFT 0x4 +#define MMSCH_CTL__P_LOCK__SHIFT 0x1f +#define MMSCH_CTL__P_RUNSTALL_MASK 0x00000001L +#define MMSCH_CTL__P_RESET_MASK 0x00000002L +#define MMSCH_CTL__VFID_FIFO_EN_MASK 0x00000010L +#define MMSCH_CTL__P_LOCK_MASK 0x80000000L +//MMSCH_INTR +#define MMSCH_INTR__INTR__SHIFT 0x0 +#define MMSCH_INTR__INTR_MASK 0x00001FFFL +//MMSCH_INTR_ACK +#define MMSCH_INTR_ACK__INTR__SHIFT 0x0 +#define MMSCH_INTR_ACK__INTR_MASK 0x00001FFFL +//MMSCH_INTR_STATUS +#define MMSCH_INTR_STATUS__INTR__SHIFT 0x0 +#define MMSCH_INTR_STATUS__INTR_MASK 0x00001FFFL +//MMSCH_VF_VMID +#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0 +#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5 +#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL +#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L +//MMSCH_VF_CTX_ADDR_LO +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6 +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L +//MMSCH_VF_CTX_ADDR_HI +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0 +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL +//MMSCH_VF_CTX_SIZE +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0 +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL +//MMSCH_VF_GPCOM_ADDR_LO +#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO__SHIFT 0x6 +#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO_MASK 0xFFFFFFC0L +//MMSCH_VF_GPCOM_ADDR_HI +#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI__SHIFT 0x0 +#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI_MASK 0xFFFFFFFFL +//MMSCH_VF_GPCOM_SIZE +#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE__SHIFT 0x0 +#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_HOST +#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_RESP +#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_0 +#define MMSCH_VF_MAILBOX_0__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_0_RESP +#define MMSCH_VF_MAILBOX_0_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_0_RESP__RESP_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_1 +#define MMSCH_VF_MAILBOX_1__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_1_RESP +#define MMSCH_VF_MAILBOX_1_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_1_RESP__RESP_MASK 0xFFFFFFFFL +//MMSCH_CNTL +#define MMSCH_CNTL__CLK_EN__SHIFT 0x0 +#define MMSCH_CNTL__ED_ENABLE__SHIFT 0x1 +#define MMSCH_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x2 +#define MMSCH_CNTL__AXI_40BIT_PIF_ADDR_FIX_EN__SHIFT 0x3 +#define MMSCH_CNTL__PDEBUG_ENABLE__SHIFT 0x4 +#define MMSCH_CNTL__MMSCH_IRQ_ERR__SHIFT 0x5 +#define MMSCH_CNTL__MMSCH_NACK_INTR_EN__SHIFT 0x9 +#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN__SHIFT 0xa +#define MMSCH_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14 +#define MMSCH_CNTL__TIMEOUT_DIS__SHIFT 0x1c +#define MMSCH_CNTL__MMSCH_IDLE__SHIFT 0x1d +#define MMSCH_CNTL__CLK_EN_MASK 0x00000001L +#define MMSCH_CNTL__ED_ENABLE_MASK 0x00000002L +#define MMSCH_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000004L +#define MMSCH_CNTL__AXI_40BIT_PIF_ADDR_FIX_EN_MASK 0x00000008L +#define MMSCH_CNTL__PDEBUG_ENABLE_MASK 0x00000010L +#define MMSCH_CNTL__MMSCH_IRQ_ERR_MASK 0x000001E0L +#define MMSCH_CNTL__MMSCH_NACK_INTR_EN_MASK 0x00000200L +#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN_MASK 0x00000400L +#define MMSCH_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L +#define MMSCH_CNTL__TIMEOUT_DIS_MASK 0x10000000L +#define MMSCH_CNTL__MMSCH_IDLE_MASK 0x20000000L +//MMSCH_NONCACHE_OFFSET0 +#define MMSCH_NONCACHE_OFFSET0__OFFSET__SHIFT 0x0 +#define MMSCH_NONCACHE_OFFSET0__OFFSET_MASK 0x0FFFFFFFL +//MMSCH_NONCACHE_SIZE0 +#define MMSCH_NONCACHE_SIZE0__SIZE__SHIFT 0x0 +#define MMSCH_NONCACHE_SIZE0__SIZE_MASK 0x00FFFFFFL +//MMSCH_NONCACHE_OFFSET1 +#define MMSCH_NONCACHE_OFFSET1__OFFSET__SHIFT 0x0 +#define MMSCH_NONCACHE_OFFSET1__OFFSET_MASK 0x0FFFFFFFL +//MMSCH_NONCACHE_SIZE1 +#define MMSCH_NONCACHE_SIZE1__SIZE__SHIFT 0x0 +#define MMSCH_NONCACHE_SIZE1__SIZE_MASK 0x00FFFFFFL +//MMSCH_PROC_STATE1 +#define MMSCH_PROC_STATE1__PC__SHIFT 0x0 +#define MMSCH_PROC_STATE1__PC_MASK 0xFFFFFFFFL +//MMSCH_LAST_MC_ADDR +#define MMSCH_LAST_MC_ADDR__MC_ADDR__SHIFT 0x0 +#define MMSCH_LAST_MC_ADDR__RW__SHIFT 0x1f +#define MMSCH_LAST_MC_ADDR__MC_ADDR_MASK 0x0FFFFFFFL +#define MMSCH_LAST_MC_ADDR__RW_MASK 0x80000000L +//MMSCH_LAST_MEM_ACCESS_HI +#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD__SHIFT 0x0 +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR__SHIFT 0x8 +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR__SHIFT 0xc +#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD_MASK 0x00000007L +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR_MASK 0x00000700L +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR_MASK 0x00007000L +//MMSCH_LAST_MEM_ACCESS_LO +#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR__SHIFT 0x0 +#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR_MASK 0xFFFFFFFFL +//MMSCH_IOV_ACTIVE_FCN_ID +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0 +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000001FL +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L +//MMSCH_SCRATCH_0 +#define MMSCH_SCRATCH_0__SCRATCH_0__SHIFT 0x0 +#define MMSCH_SCRATCH_0__SCRATCH_0_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_1 +#define MMSCH_SCRATCH_1__SCRATCH_1__SHIFT 0x0 +#define MMSCH_SCRATCH_1__SCRATCH_1_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_0 +#define MMSCH_GPUIOV_SCH_BLOCK_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_0__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_CONTROL_0 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE__SHIFT 0x4 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN__SHIFT 0x5 +#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN__SHIFT 0x6 +#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID__SHIFT 0x8 +#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID__SHIFT 0x10 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE_MASK 0x0000000FL +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_MASK 0x00000010L +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN_MASK 0x00000020L +#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN_MASK 0x00000040L +#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID_MASK 0x0000FF00L +#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID_MASK 0x00FF0000L +//MMSCH_GPUIOV_CMD_STATUS_0 +#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_VM_BUSY_STATUS_0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCNS_0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_DW6_0 +#define MMSCH_GPUIOV_DW6_0__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW6_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW7_0 +#define MMSCH_GPUIOV_DW7_0__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW7_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW8_0 +#define MMSCH_GPUIOV_DW8_0__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW8_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_1 +#define MMSCH_GPUIOV_SCH_BLOCK_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_1__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_CONTROL_1 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE__SHIFT 0x4 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN__SHIFT 0x5 +#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN__SHIFT 0x6 +#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID__SHIFT 0x8 +#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID__SHIFT 0x10 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE_MASK 0x0000000FL +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_MASK 0x00000010L +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L +#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN_MASK 0x00000040L +#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID_MASK 0x0000FF00L +#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID_MASK 0x00FF0000L +//MMSCH_GPUIOV_CMD_STATUS_1 +#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_VM_BUSY_STATUS_1 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCNS_1 +#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_1 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_DW6_1 +#define MMSCH_GPUIOV_DW6_1__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW6_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW7_1 +#define MMSCH_GPUIOV_DW7_1__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW7_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW8_1 +#define MMSCH_GPUIOV_DW8_1__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW8_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_CNTXT +#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE__SHIFT 0x0 +#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION__SHIFT 0x7 +#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET__SHIFT 0xa +#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE_MASK 0x0000007FL +#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION_MASK 0x00000080L +#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET_MASK 0xFFFFFC00L +//MMSCH_SCRATCH_2 +#define MMSCH_SCRATCH_2__SCRATCH_2__SHIFT 0x0 +#define MMSCH_SCRATCH_2__SCRATCH_2_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_3 +#define MMSCH_SCRATCH_3__SCRATCH_3__SHIFT 0x0 +#define MMSCH_SCRATCH_3__SCRATCH_3_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_4 +#define MMSCH_SCRATCH_4__SCRATCH_4__SHIFT 0x0 +#define MMSCH_SCRATCH_4__SCRATCH_4_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_5 +#define MMSCH_SCRATCH_5__SCRATCH_5__SHIFT 0x0 +#define MMSCH_SCRATCH_5__SCRATCH_5_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_6 +#define MMSCH_SCRATCH_6__SCRATCH_6__SHIFT 0x0 +#define MMSCH_SCRATCH_6__SCRATCH_6_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_7 +#define MMSCH_SCRATCH_7__SCRATCH_7__SHIFT 0x0 +#define MMSCH_SCRATCH_7__SCRATCH_7_MASK 0xFFFFFFFFL +//MMSCH_VFID_FIFO_HEAD_0 +#define MMSCH_VFID_FIFO_HEAD_0__HEAD__SHIFT 0x0 +#define MMSCH_VFID_FIFO_HEAD_0__HEAD_MASK 0x0000003FL +//MMSCH_VFID_FIFO_TAIL_0 +#define MMSCH_VFID_FIFO_TAIL_0__TAIL__SHIFT 0x0 +#define MMSCH_VFID_FIFO_TAIL_0__TAIL_MASK 0x0000003FL +//MMSCH_VFID_FIFO_HEAD_1 +#define MMSCH_VFID_FIFO_HEAD_1__HEAD__SHIFT 0x0 +#define MMSCH_VFID_FIFO_HEAD_1__HEAD_MASK 0x0000003FL +//MMSCH_VFID_FIFO_TAIL_1 +#define MMSCH_VFID_FIFO_TAIL_1__TAIL__SHIFT 0x0 +#define MMSCH_VFID_FIFO_TAIL_1__TAIL_MASK 0x0000003FL +//MMSCH_NACK_STATUS +#define MMSCH_NACK_STATUS__WR_NACK_STATUS__SHIFT 0x0 +#define MMSCH_NACK_STATUS__RD_NACK_STATUS__SHIFT 0x2 +#define MMSCH_NACK_STATUS__WR_NACK_STATUS_MASK 0x00000003L +#define MMSCH_NACK_STATUS__RD_NACK_STATUS_MASK 0x0000000CL +//MMSCH_VF_MAILBOX0_DATA +#define MMSCH_VF_MAILBOX0_DATA__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX0_DATA__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX1_DATA +#define MMSCH_VF_MAILBOX1_DATA__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX1_DATA__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_IP_0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_STATUS_IP_0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_SCH_BLOCK_IP_1 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_STATUS_IP_1 +#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_CNTXT_IP +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE__SHIFT 0x0 +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION__SHIFT 0x7 +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE_MASK 0x0000007FL +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION_MASK 0x00000080L +//MMSCH_GPUIOV_SCH_BLOCK_2 +#define MMSCH_GPUIOV_SCH_BLOCK_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_2__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_CONTROL_2 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE__SHIFT 0x4 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN__SHIFT 0x5 +#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN__SHIFT 0x6 +#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID__SHIFT 0x8 +#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID__SHIFT 0x10 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE_MASK 0x0000000FL +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_MASK 0x00000010L +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN_MASK 0x00000020L +#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN_MASK 0x00000040L +#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID_MASK 0x0000FF00L +#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID_MASK 0x00FF0000L +//MMSCH_GPUIOV_CMD_STATUS_2 +#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_VM_BUSY_STATUS_2 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCNS_2 +#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_2 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_DW6_2 +#define MMSCH_GPUIOV_DW6_2__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW6_2__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW7_2 +#define MMSCH_GPUIOV_DW7_2__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW7_2__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW8_2 +#define MMSCH_GPUIOV_DW8_2__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW8_2__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_IP_2 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_STATUS_IP_2 +#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS_MASK 0x00000F00L +//MMSCH_VFID_FIFO_HEAD_2 +#define MMSCH_VFID_FIFO_HEAD_2__HEAD__SHIFT 0x0 +#define MMSCH_VFID_FIFO_HEAD_2__HEAD_MASK 0x0000003FL +//MMSCH_VFID_FIFO_TAIL_2 +#define MMSCH_VFID_FIFO_TAIL_2__TAIL__SHIFT 0x0 +#define MMSCH_VFID_FIFO_TAIL_2__TAIL_MASK 0x0000003FL +//MMSCH_VM_BUSY_STATUS_0 +#define MMSCH_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0 +#define MMSCH_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL +//MMSCH_VM_BUSY_STATUS_1 +#define MMSCH_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0 +#define MMSCH_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL +//MMSCH_VM_BUSY_STATUS_2 +#define MMSCH_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0 +#define MMSCH_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_slmi_adpdec +//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC_VMID +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L +//UVD_LMI_MMSCH_CTRL +#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0 +#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1 +#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH__SHIFT 0x2 +#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3 +#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5 +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7 +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9 +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc +#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L +#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L +#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH_MASK 0x00000004L +#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L +#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L +//UVD_MMSCH_LMI_STATUS +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT__SHIFT 0x0 +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0x1 +#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN__SHIFT 0x2 +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN__SHIFT 0x4 +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS__SHIFT 0x8 +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE__SHIFT 0xc +#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN__SHIFT 0xd +#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN__SHIFT 0xe +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT_MASK 0x00000001L +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00000002L +#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN_MASK 0x00000004L +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN_MASK 0x000000F0L +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS_MASK 0x00000700L +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE_MASK 0x00001000L +#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN_MASK 0x00002000L +#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN_MASK 0x00004000L +//VCN_RAS_CNTL_MMSCH +#define VCN_RAS_CNTL_MMSCH__MMSCH_FATAL_ERROR_EN__SHIFT 0x1 +#define VCN_RAS_CNTL_MMSCH__MMSCH_PMI_EN__SHIFT 0x5 +#define VCN_RAS_CNTL_MMSCH__MMSCH_REARM__SHIFT 0x9 +#define VCN_RAS_CNTL_MMSCH__MMSCH_READY__SHIFT 0x11 +#define VCN_RAS_CNTL_MMSCH__MMSCH_FATAL_ERROR_EN_MASK 0x00000002L +#define VCN_RAS_CNTL_MMSCH__MMSCH_PMI_EN_MASK 0x00000020L +#define VCN_RAS_CNTL_MMSCH__MMSCH_REARM_MASK 0x00000200L +#define VCN_RAS_CNTL_MMSCH__MMSCH_READY_MASK 0x00020000L + + +// addressBlock: aid_uvd0_uvd_jrbc1_uvd_jrbc_dec +//UVD_JRBC1_UVD_JRBC_RB_WPTR +#define UVD_JRBC1_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC1_UVD_JRBC_RB_CNTL +#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC1_UVD_JRBC_IB_SIZE +#define UVD_JRBC1_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC1_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC1_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC1_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC1_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC1_UVD_JRBC_SOFT_RESET +#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC1_UVD_JRBC_STATUS +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC1_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC1_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC1_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC1_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC1_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC1_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC1_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC1_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC1_UVD_JRBC_RB_RPTR +#define UVD_JRBC1_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC1_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC1_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC1_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC1_UVD_JRBC_RB_SIZE +#define UVD_JRBC1_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC1_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC1_UVD_JRBC_SCRATCH0 +#define UVD_JRBC1_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC1_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jrbc2_uvd_jrbc_dec +//UVD_JRBC2_UVD_JRBC_RB_WPTR +#define UVD_JRBC2_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC2_UVD_JRBC_RB_CNTL +#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC2_UVD_JRBC_IB_SIZE +#define UVD_JRBC2_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC2_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC2_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC2_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC2_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC2_UVD_JRBC_SOFT_RESET +#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC2_UVD_JRBC_STATUS +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC2_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC2_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC2_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC2_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC2_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC2_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC2_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC2_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC2_UVD_JRBC_RB_RPTR +#define UVD_JRBC2_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC2_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC2_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC2_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC2_UVD_JRBC_RB_SIZE +#define UVD_JRBC2_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC2_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC2_UVD_JRBC_SCRATCH0 +#define UVD_JRBC2_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC2_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jrbc3_uvd_jrbc_dec +//UVD_JRBC3_UVD_JRBC_RB_WPTR +#define UVD_JRBC3_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC3_UVD_JRBC_RB_CNTL +#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC3_UVD_JRBC_IB_SIZE +#define UVD_JRBC3_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC3_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC3_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC3_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC3_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC3_UVD_JRBC_SOFT_RESET +#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC3_UVD_JRBC_STATUS +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC3_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC3_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC3_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC3_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC3_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC3_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC3_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC3_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC3_UVD_JRBC_RB_RPTR +#define UVD_JRBC3_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC3_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC3_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC3_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC3_UVD_JRBC_RB_SIZE +#define UVD_JRBC3_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC3_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC3_UVD_JRBC_SCRATCH0 +#define UVD_JRBC3_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC3_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jrbc4_uvd_jrbc_dec +//UVD_JRBC4_UVD_JRBC_RB_WPTR +#define UVD_JRBC4_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC4_UVD_JRBC_RB_CNTL +#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC4_UVD_JRBC_IB_SIZE +#define UVD_JRBC4_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC4_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC4_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC4_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC4_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC4_UVD_JRBC_SOFT_RESET +#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC4_UVD_JRBC_STATUS +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC4_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC4_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC4_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC4_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC4_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC4_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC4_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC4_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC4_UVD_JRBC_RB_RPTR +#define UVD_JRBC4_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC4_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC4_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC4_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC4_UVD_JRBC_RB_SIZE +#define UVD_JRBC4_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC4_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC4_UVD_JRBC_SCRATCH0 +#define UVD_JRBC4_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC4_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jrbc5_uvd_jrbc_dec +//UVD_JRBC5_UVD_JRBC_RB_WPTR +#define UVD_JRBC5_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC5_UVD_JRBC_RB_CNTL +#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC5_UVD_JRBC_IB_SIZE +#define UVD_JRBC5_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC5_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC5_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC5_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC5_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC5_UVD_JRBC_SOFT_RESET +#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC5_UVD_JRBC_STATUS +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC5_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC5_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC5_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC5_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC5_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC5_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC5_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC5_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC5_UVD_JRBC_RB_RPTR +#define UVD_JRBC5_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC5_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC5_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC5_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC5_UVD_JRBC_RB_SIZE +#define UVD_JRBC5_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC5_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC5_UVD_JRBC_SCRATCH0 +#define UVD_JRBC5_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC5_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jrbc6_uvd_jrbc_dec +//UVD_JRBC6_UVD_JRBC_RB_WPTR +#define UVD_JRBC6_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC6_UVD_JRBC_RB_CNTL +#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC6_UVD_JRBC_IB_SIZE +#define UVD_JRBC6_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC6_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC6_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC6_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC6_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC6_UVD_JRBC_SOFT_RESET +#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC6_UVD_JRBC_STATUS +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC6_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC6_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC6_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC6_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC6_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC6_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC6_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC6_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC6_UVD_JRBC_RB_RPTR +#define UVD_JRBC6_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC6_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC6_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC6_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC6_UVD_JRBC_RB_SIZE +#define UVD_JRBC6_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC6_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC6_UVD_JRBC_SCRATCH0 +#define UVD_JRBC6_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC6_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jrbc7_uvd_jrbc_dec +//UVD_JRBC7_UVD_JRBC_RB_WPTR +#define UVD_JRBC7_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC7_UVD_JRBC_RB_CNTL +#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC7_UVD_JRBC_IB_SIZE +#define UVD_JRBC7_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC7_UVD_JRBC_URGENT_CNTL +#define UVD_JRBC7_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC7_UVD_JRBC_RB_REF_DATA +#define UVD_JRBC7_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC7_UVD_JRBC_SOFT_RESET +#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC7_UVD_JRBC_STATUS +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC7_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC7_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC7_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC7_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC7_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC7_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC7_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC7_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC7_UVD_JRBC_RB_RPTR +#define UVD_JRBC7_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC7_UVD_JRBC_IB_REF_DATA +#define UVD_JRBC7_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC7_UVD_JPEG_PREEMPT_CMD +#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC7_UVD_JRBC_RB_SIZE +#define UVD_JRBC7_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC7_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC7_UVD_JRBC_SCRATCH0 +#define UVD_JRBC7_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC7_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: aid_uvd0_uvd_jmi1_uvd_jmi_dec +//UVD_JMI1_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI1_UVD_LMI_JRBC_CTRL +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI1_UVD_LMI_JPEG_CTRL +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI1_JPEG_LMI_DROP +#define UVD_JMI1_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI1_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI1_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI1_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI1_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI1_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI1_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI1_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI1_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI1_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI1_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI1_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI1_UVD_LMI_JPEG_VMID +#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI1_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI1_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI1_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI1_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi2_uvd_jmi_dec +//UVD_JMI2_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI2_UVD_LMI_JRBC_CTRL +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI2_UVD_LMI_JPEG_CTRL +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI2_JPEG_LMI_DROP +#define UVD_JMI2_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI2_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI2_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI2_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI2_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI2_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI2_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI2_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI2_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI2_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI2_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI2_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI2_UVD_LMI_JPEG_VMID +#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI2_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI2_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI2_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI2_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi3_uvd_jmi_dec +//UVD_JMI3_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI3_UVD_LMI_JRBC_CTRL +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI3_UVD_LMI_JPEG_CTRL +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI3_JPEG_LMI_DROP +#define UVD_JMI3_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI3_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI3_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI3_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI3_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI3_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI3_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI3_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI3_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI3_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI3_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI3_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI3_UVD_LMI_JPEG_VMID +#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI3_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI3_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI3_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI3_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi4_uvd_jmi_dec +//UVD_JMI4_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI4_UVD_LMI_JRBC_CTRL +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI4_UVD_LMI_JPEG_CTRL +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI4_JPEG_LMI_DROP +#define UVD_JMI4_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI4_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI4_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI4_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI4_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI4_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI4_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI4_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI4_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI4_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI4_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI4_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI4_UVD_LMI_JPEG_VMID +#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI4_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI4_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI4_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI4_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi5_uvd_jmi_dec +//UVD_JMI5_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI5_UVD_LMI_JRBC_CTRL +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI5_UVD_LMI_JPEG_CTRL +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI5_JPEG_LMI_DROP +#define UVD_JMI5_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI5_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI5_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI5_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI5_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI5_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI5_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI5_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI5_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI5_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI5_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI5_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI5_UVD_LMI_JPEG_VMID +#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI5_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI5_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI5_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI5_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi6_uvd_jmi_dec +//UVD_JMI6_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI6_UVD_LMI_JRBC_CTRL +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI6_UVD_LMI_JPEG_CTRL +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI6_JPEG_LMI_DROP +#define UVD_JMI6_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI6_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI6_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI6_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI6_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI6_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI6_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI6_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI6_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI6_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI6_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI6_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI6_UVD_LMI_JPEG_VMID +#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI6_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI6_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI6_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI6_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: aid_uvd0_uvd_jmi7_uvd_jmi_dec +//UVD_JMI7_UVD_JPEG_DEC_PF_CTRL +#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_JMI7_UVD_LMI_JRBC_CTRL +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI7_UVD_LMI_JPEG_CTRL +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI7_JPEG_LMI_DROP +#define UVD_JMI7_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define UVD_JMI7_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define UVD_JMI7_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define UVD_JMI7_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define UVD_JMI7_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4 +#define UVD_JMI7_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define UVD_JMI7_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define UVD_JMI7_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define UVD_JMI7_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +#define UVD_JMI7_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L +//UVD_JMI7_UVD_LMI_JRBC_IB_VMID +#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI7_UVD_LMI_JRBC_RB_VMID +#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI7_UVD_LMI_JPEG_VMID +#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_JMI7_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_JMI7_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI7_UVD_JMI_ATOMIC_CNTL +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI7_UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: uvdctxind +//UVD_CGC_MEM_CTRL +#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x0 +#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x1 +#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x2 +#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x3 +#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x4 +#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x5 +#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x6 +#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x7 +#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x8 +#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x9 +#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0xa +#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0xc +#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0xd +#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN__SHIFT 0xe +#define UVD_CGC_MEM_CTRL__MPC1_LS_EN__SHIFT 0xf +#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10 +#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14 +#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L +#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L +#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L +#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L +#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L +#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L +#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L +#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L +#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L +#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L +#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L +#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L +#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L +#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN_MASK 0x00004000L +#define UVD_CGC_MEM_CTRL__MPC1_LS_EN_MASK 0x00008000L +#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L +#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L +//UVD_CGC_CTRL2 +#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x0 +#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x1 +#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x2 +#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L +#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L +#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001CL +//UVD_CGC_MEM_DS_CTRL +#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN__SHIFT 0x0 +#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN__SHIFT 0x1 +#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN__SHIFT 0x2 +#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN__SHIFT 0x3 +#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN__SHIFT 0x4 +#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN__SHIFT 0x5 +#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN__SHIFT 0x6 +#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN__SHIFT 0x7 +#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN__SHIFT 0x8 +#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN__SHIFT 0x9 +#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN__SHIFT 0xa +#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN__SHIFT 0xc +#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN__SHIFT 0xd +#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN__SHIFT 0xe +#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN__SHIFT 0xf +#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN_MASK 0x00000001L +#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN_MASK 0x00000002L +#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN_MASK 0x00000004L +#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN_MASK 0x00000008L +#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN_MASK 0x00000010L +#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN_MASK 0x00000020L +#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN_MASK 0x00000040L +#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN_MASK 0x00000080L +#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN_MASK 0x00000100L +#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN_MASK 0x00000200L +#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN_MASK 0x00000400L +#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN_MASK 0x00001000L +#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN_MASK 0x00002000L +#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN_MASK 0x00004000L +#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN_MASK 0x00008000L +//UVD_CGC_MEM_SD_CTRL +#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN__SHIFT 0x0 +#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN__SHIFT 0x1 +#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN__SHIFT 0x2 +#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN__SHIFT 0x3 +#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN__SHIFT 0x4 +#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN__SHIFT 0x5 +#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN__SHIFT 0x6 +#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN__SHIFT 0x7 +#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN__SHIFT 0x8 +#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN__SHIFT 0x9 +#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN__SHIFT 0xa +#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN__SHIFT 0xc +#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN__SHIFT 0xd +#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN__SHIFT 0xe +#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN__SHIFT 0xf +#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN_MASK 0x00000001L +#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN_MASK 0x00000002L +#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN_MASK 0x00000004L +#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN_MASK 0x00000008L +#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN_MASK 0x00000010L +#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN_MASK 0x00000020L +#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN_MASK 0x00000040L +#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN_MASK 0x00000080L +#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN_MASK 0x00000100L +#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN_MASK 0x00000200L +#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN_MASK 0x00000400L +#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN_MASK 0x00001000L +#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN_MASK 0x00002000L +#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN_MASK 0x00004000L +#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN_MASK 0x00008000L +//UVD_SW_SCRATCH_00 +#define UVD_SW_SCRATCH_00__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_00__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_01 +#define UVD_SW_SCRATCH_01__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_01__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_02 +#define UVD_SW_SCRATCH_02__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_02__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_03 +#define UVD_SW_SCRATCH_03__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_03__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_04 +#define UVD_SW_SCRATCH_04__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_04__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_05 +#define UVD_SW_SCRATCH_05__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_05__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_06 +#define UVD_SW_SCRATCH_06__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_06__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_07 +#define UVD_SW_SCRATCH_07__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_07__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_08 +#define UVD_SW_SCRATCH_08__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_08__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_09 +#define UVD_SW_SCRATCH_09__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_09__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_10 +#define UVD_SW_SCRATCH_10__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_10__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_11 +#define UVD_SW_SCRATCH_11__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_11__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_12 +#define UVD_SW_SCRATCH_12__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_12__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_13 +#define UVD_SW_SCRATCH_13__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_13__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_14 +#define UVD_SW_SCRATCH_14__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_14__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_15 +#define UVD_SW_SCRATCH_15__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_15__DATA_MASK 0xFFFFFFFFL +//UVD_IH_SEM_CTRL +#define UVD_IH_SEM_CTRL__IH_STALL_EN__SHIFT 0x0 +#define UVD_IH_SEM_CTRL__SEM_STALL_EN__SHIFT 0x1 +#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN__SHIFT 0x2 +#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN__SHIFT 0x3 +#define UVD_IH_SEM_CTRL__IH_VMID__SHIFT 0x4 +#define UVD_IH_SEM_CTRL__IH_USER_DATA__SHIFT 0x8 +#define UVD_IH_SEM_CTRL__IH_RINGID__SHIFT 0x14 +#define UVD_IH_SEM_CTRL__IH_STALL_EN_MASK 0x00000001L +#define UVD_IH_SEM_CTRL__SEM_STALL_EN_MASK 0x00000002L +#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L +#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN_MASK 0x00000008L +#define UVD_IH_SEM_CTRL__IH_VMID_MASK 0x000000F0L +#define UVD_IH_SEM_CTRL__IH_USER_DATA_MASK 0x000FFF00L +#define UVD_IH_SEM_CTRL__IH_RINGID_MASK 0x0FF00000L + + +// addressBlock: lmi_adp_indirect +//UVD_LMI_CRC0 +#define UVD_LMI_CRC0__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC1 +#define UVD_LMI_CRC1__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC2 +#define UVD_LMI_CRC2__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC3 +#define UVD_LMI_CRC3__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC10 +#define UVD_LMI_CRC10__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC10__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC11 +#define UVD_LMI_CRC11__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC11__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC12 +#define UVD_LMI_CRC12__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC12__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC13 +#define UVD_LMI_CRC13__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC13__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC14 +#define UVD_LMI_CRC14__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC14__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC15 +#define UVD_LMI_CRC15__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC15__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_SWAP_CNTL2 +#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x0 +#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x2 +#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x4 +#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP__SHIFT 0xc +#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP__SHIFT 0xe +#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L +#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000CL +#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP_MASK 0x00000FF0L +#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP_MASK 0x00003000L +#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP_MASK 0x0000C000L +//UVD_MEMCHECK_SYS_INT_EN +#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN__SHIFT 0x0 +#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN__SHIFT 0x1 +#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN__SHIFT 0x2 +#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN__SHIFT 0x3 +#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN__SHIFT 0x4 +#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN__SHIFT 0x5 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6 +#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN__SHIFT 0x7 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb +#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN__SHIFT 0xc +#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN__SHIFT 0xf +#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN__SHIFT 0x10 +#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN__SHIFT 0x11 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12 +#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13 +#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14 +#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15 +#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x1b +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1c +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1d +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1e +#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN__SHIFT 0x1f +#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN_MASK 0x00000001L +#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN_MASK 0x00000002L +#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN_MASK 0x00000004L +#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN_MASK 0x00000008L +#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN_MASK 0x00000010L +#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN_MASK 0x00000020L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L +#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN_MASK 0x00000080L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L +#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN_MASK 0x00001000L +#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L +#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L +#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L +#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L +#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L +#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L +#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN_MASK 0x08000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN_MASK 0x10000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN_MASK 0x20000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x40000000L +#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN_MASK 0x80000000L +//UVD_MEMCHECK_SYS_INT_STAT +#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR__SHIFT 0xe +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR__SHIFT 0xf +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f +#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L +//UVD_MEMCHECK_SYS_INT_ACK +#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK__SHIFT 0xe +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK__SHIFT 0xf +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f +#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L +//UVD_MEMCHECK_VCPU_INT_EN +#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN__SHIFT 0x0 +#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN__SHIFT 0x1 +#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN__SHIFT 0x2 +#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN__SHIFT 0x3 +#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN__SHIFT 0x4 +#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN__SHIFT 0x5 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6 +#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN__SHIFT 0x7 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb +#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN__SHIFT 0xc +#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN__SHIFT 0xf +#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN__SHIFT 0x10 +#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN__SHIFT 0x11 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12 +#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13 +#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14 +#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15 +#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x19 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1a +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1b +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1c +#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN__SHIFT 0x1d +#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN_MASK 0x00000001L +#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN_MASK 0x00000002L +#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN_MASK 0x00000004L +#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN_MASK 0x00000008L +#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN_MASK 0x00000010L +#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN_MASK 0x00000020L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L +#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN_MASK 0x00000080L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L +#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN_MASK 0x00001000L +#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L +#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L +#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L +#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L +#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L +#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L +#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN_MASK 0x02000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN_MASK 0x04000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN_MASK 0x08000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x10000000L +#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN_MASK 0x20000000L +//UVD_MEMCHECK_VCPU_INT_STAT +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR__SHIFT 0xe +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR__SHIFT 0xf +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L +//UVD_MEMCHECK_VCPU_INT_ACK +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK__SHIFT 0xe +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK__SHIFT 0xf +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L +//UVD_MEMCHECK2_SYS_INT_STAT +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x1a +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x1b +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x1c +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x1d +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR__SHIFT 0x1e +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR__SHIFT 0x1f +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x04000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x08000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x10000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x20000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR_MASK 0x40000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR_MASK 0x80000000L +//UVD_MEMCHECK2_SYS_INT_ACK +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x1a +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x1b +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x1c +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x1d +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK__SHIFT 0x1e +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK__SHIFT 0x1f +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x04000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x08000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x10000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x20000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK_MASK 0x40000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK_MASK 0x80000000L +//UVD_MEMCHECK2_VCPU_INT_STAT +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x12 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x13 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x14 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x15 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR__SHIFT 0x1a +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR__SHIFT 0x1b +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00040000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00080000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x00100000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x00200000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR_MASK 0x04000000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR_MASK 0x08000000L +//UVD_MEMCHECK2_VCPU_INT_ACK +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x12 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x13 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x14 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x15 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK__SHIFT 0x1a +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK__SHIFT 0x1b +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00040000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00080000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x00100000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x00200000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK_MASK 0x04000000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK_MASK 0x08000000L + + +#endif From 76e5e4c70160b3764db6093b3366b36e4466d727 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Thu, 6 Jan 2022 14:41:33 -0500 Subject: [PATCH 251/861] drm/amdgpu: add VCN4_0_3 firmware Add VCN4_0_3 firmware. v2: fix fw name (Alex) Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e63fcc58e8e0..72eb12aa2e8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -56,6 +56,7 @@ #define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" #define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" #define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" +#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin" #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); @@ -77,6 +78,7 @@ MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2); MODULE_FIRMWARE(FIRMWARE_VCN4_0_0); MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -111,9 +113,18 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(4, 0, 3): + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = false; + break; + default: + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + } /* * Some Steam Deck's BIOS versions are incompatible with the From e684e654eba9481a9f462a7bbf5c385b7d1c076e Mon Sep 17 00:00:00 2001 From: James Zhu Date: Thu, 6 Jan 2022 17:04:42 -0500 Subject: [PATCH 252/861] drm/amdgpu/jpeg: add jpeg support for VCN4_0_3 Add jpeg support for VCN4_0_3. v2: squash in delayed work typo fix (Alex) Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 762 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h | 49 ++ 3 files changed, 813 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c create mode 100644 drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7fb09000efc0..fc20aee6694f 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -188,7 +188,8 @@ amdgpu-y += \ jpeg_v2_0.o \ jpeg_v2_5.o \ jpeg_v3_0.o \ - jpeg_v4_0.o + jpeg_v4_0.o \ + jpeg_v4_0_3.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c new file mode 100644 index 000000000000..1fc72f9b52ed --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -0,0 +1,762 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v4_0_3.h" + +#include "vcn/vcn_4_0_3_offset.h" +#include "vcn/vcn_4_0_3_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +enum jpeg_engin_status { + UVD_PGFSM_STATUS__UVDJ_PWR_ON = 0, + UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2, +}; + +static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v4_0_3_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * jpeg_v4_0_3_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v4_0_3_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + jpeg_v4_0_3_set_dec_ring_funcs(adev); + jpeg_v4_0_3_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v4_0_3_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v4_0_3_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + ring->use_doorbell = false; + ring->vm_hub = AMDGPU_MMHUB0(0); + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v4_0_3_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v4_0_3_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v4_0_3_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v4_0_3_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + r = amdgpu_ring_test_helper(ring); + if (!r) + DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); + + return r; +} + +/** + * jpeg_v4_0_3_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v4_0_3_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) + jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE); + + return 0; +} + +/** + * jpeg_v4_0_3_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v4_0_3_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v4_0_3_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v4_0_3_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v4_0_3_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v4_0_3_hw_init(adev); + + return r; +} + +static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); +} + +static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); +} + +/** + * jpeg_v4_0_3_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v4_0_3_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + + WREG32_SOC15(JPEG, 0, regUVD_PGFSM_CONFIG, + 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_ON << + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + jpeg_v4_0_3_disable_clock_gating(adev); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC0_MASK, + ~JPEG_SYS_INT_EN__DJRBC0_MASK); + + WREG32_SOC15(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v4_0_3_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v4_0_3_stop(struct amdgpu_device *adev) +{ + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v4_0_3_enable_clock_gating(adev); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + WREG32_SOC15(JPEG, 0, regUVD_PGFSM_CONFIG, + 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_OFF << + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + + return 0; +} + +/** + * jpeg_v4_0_3_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v4_0_3_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v4_0_3_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, ring->me, + regUVD_JRBC0_UVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +/** + * jpeg_v4_0_3_dec_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x62a04);/* TODO: PCTL0_MMHUB_DEEPSLEEP_IB */ + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80004000); +} + +/** + * jpeg_v4_0_3_dec_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x62a04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00004000); +} + +/** + * jpeg_v4_0_3_dec_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Write a fence and a trap command to the ring. + */ +static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned int flags) +{ + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v4_0_3_dec_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from + * @ib: indirect buffer to execute + * @flags: unused + * + * Write ring commands to execute the indirect buffer. + */ +static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned int vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_STATUS_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask); +} + +static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static bool jpeg_v4_0_3_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ((RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS) & + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v4_0_3_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS, + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + return ret; +} + +static int jpeg_v4_0_3_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE); + + if (enable) { + if (!jpeg_v4_0_3_is_idle(handle)) + return -EBUSY; + jpeg_v4_0_3_enable_clock_gating(adev); + } else { + jpeg_v4_0_3_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v4_0_3_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v4_0_3_stop(adev); + else + ret = jpeg_v4_0_3_start(adev); + + if (!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { + .name = "jpeg_v4_0_3", + .early_init = jpeg_v4_0_3_early_init, + .late_init = NULL, + .sw_init = jpeg_v4_0_3_sw_init, + .sw_fini = jpeg_v4_0_3_sw_fini, + .hw_init = jpeg_v4_0_3_hw_init, + .hw_fini = jpeg_v4_0_3_hw_fini, + .suspend = jpeg_v4_0_3_suspend, + .resume = jpeg_v4_0_3_resume, + .is_idle = jpeg_v4_0_3_is_idle, + .wait_for_idle = jpeg_v4_0_3_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state, + .set_powergating_state = jpeg_v4_0_3_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, + .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, + .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */ + .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, + .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v4_0_3_dec_ring_nop, + .insert_start = jpeg_v4_0_3_dec_ring_insert_start, + .insert_end = jpeg_v4_0_3_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; + adev->jpeg.inst->ring_dec.me = 0; + DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = { + .set = jpeg_v4_0_3_set_interrupt_state, + .process = jpeg_v4_0_3_process_interrupt, +}; + +static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = { + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 4, + .minor = 0, + .rev = 3, + .funcs = &jpeg_v4_0_3_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h new file mode 100644 index 000000000000..ca03d17e13fa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h @@ -0,0 +1,49 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V4_0_3_H__ +#define __JPEG_V4_0_3_H__ + +#define regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff +#define regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x404d +#define regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x404e +#define regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x404f +#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ab +#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ac +#define regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40a4 +#define regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40a6 +#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40b6 +#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40b7 +#define regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 +#define regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x42d4 +#define regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x42d5 +#define regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 +#define regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 +#define regUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 +#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x4043 + +#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 + +extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block; + +#endif /* __JPEG_V4_0_3_H__ */ From b889ef4ac98837838c38f7b9f72bba2f33ee367d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 24 May 2022 11:56:44 +0800 Subject: [PATCH 253/861] drm/amdgpu/vcn: add vcn support for VCN4_0_3 Add vcn support for VCN4_0_3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 1438 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h | 29 + 3 files changed, 1468 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c create mode 100644 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index fc20aee6694f..594e303084c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -183,6 +183,7 @@ amdgpu-y += \ vcn_v2_5.o \ vcn_v3_0.o \ vcn_v4_0.o \ + vcn_v4_0_3.o \ amdgpu_jpeg.o \ jpeg_v1_0.o \ jpeg_v2_0.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c new file mode 100644 index 000000000000..98bff162f453 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -0,0 +1,1438 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "soc15_hw_ip.h" +#include "vcn_v2_0.h" +#include "vcn_sw_ring.h" + +#include "vcn/vcn_4_0_3_offset.h" +#include "vcn/vcn_4_0_3_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL +#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX +#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA +#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX + +#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 +#define VCN1_VID_SOC_ADDRESS_3_0 0x48300 + +static void vcn_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev); +static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v4_0_3_set_powergating_state(void *handle, + enum amd_powergating_state state); +static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state); + +/** + * vcn_v4_0_3_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int vcn_v4_0_3_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vcn_v4_0_3_set_dec_ring_funcs(adev); + vcn_v4_0_3_set_irq_funcs(adev); + + return 0; +} + +/** + * vcn_v4_0_3_sw_init - sw init for VCN block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int vcn_v4_0_3_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_ring *ring; + int r; + + r = amdgpu_vcn_sw_init(adev); + if (r) + return r; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + + DRM_DEV_INFO(adev->dev, "Will use PSP to load VCN firmware\n"); + } + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + /* VCN DEC TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); + if (r) + return r; + + ring = &adev->vcn.inst->ring_dec; + ring->use_doorbell = false; + ring->vm_hub = AMDGPU_MMHUB0(0); + sprintf(ring->name, "vcn_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, + &adev->vcn.inst->sched_score); + if (r) + return r; + + fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | + cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | + cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); + fw_shared->sw_ring.is_enabled = cpu_to_le32(true); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode; + + return 0; +} + +/** + * vcn_v4_0_3_sw_fini - sw fini for VCN block + * + * @handle: amdgpu_device pointer + * + * VCN suspend and free up sw allocation + */ +static int vcn_v4_0_3_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r, idx; + + if (drm_dev_enter(&adev->ddev, &idx)) { + volatile struct amdgpu_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sw_ring.is_enabled = cpu_to_le32(false); + + drm_dev_exit(idx); + } + + r = amdgpu_vcn_suspend(adev); + if (r) + return r; + + r = amdgpu_vcn_sw_fini(adev); + + return r; +} + +/** + * vcn_v4_0_3_hw_init - start and test VCN block + * + * @handle: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v4_0_3_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; + int r; + + r = amdgpu_ring_test_helper(ring); + + if (!r) + DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); + + return r; +} + +/** + * vcn_v4_0_3_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v4_0_3_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + if (adev->vcn.cur_state != AMD_PG_STATE_GATE) + vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE); + + return 0; +} + +/** + * vcn_v4_0_3_suspend - suspend VCN block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend VCN block + */ +static int vcn_v4_0_3_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = vcn_v4_0_3_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vcn_suspend(adev); + + return r; +} + +/** + * vcn_v4_0_3_resume - resume VCN block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init VCN block + */ +static int vcn_v4_0_3_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + r = vcn_v4_0_3_hw_init(adev); + + return r; +} + +/** + * vcn_v4_0_3_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev) +{ + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst->gpu_addr)); + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst->gpu_addr)); + offset = size; + if (amdgpu_emu_mode == 1) + /* No signed header here */ + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET0, 0); + else + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr)); + WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr)); + WREG32_SOC15(VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, 0, regUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); +} + +/** + * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode + * + * @adev: amdgpu_device pointer + * @indirect: indirectly write sram + * + * Let the VCN memory controller know it's offsets with dpg mode + */ +static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) +{ + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); + offset = size; + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), + 0, 0, indirect); + } + + if (!indirect) + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + +/** + * vcn_v4_0_disable_static_power_gating - disable VCN static power gating + * + * @adev: amdgpu_device pointer + * + * Disable static power gating for VCN block + */ +static void vcn_v4_0_3_disable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, 0, regUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); + } else { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, 0, regUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_PGFSM_STATUS, 0, 0x3F3FFFFF); + } + + data = RREG32_SOC15(VCN, 0, regUVD_POWER_STATUS); + data &= ~0x103; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | + UVD_POWER_STATUS__UVD_PG_EN_MASK; + + WREG32_SOC15(VCN, 0, regUVD_POWER_STATUS, data); +} + +/** + * vcn_v4_0_3_enable_static_power_gating - enable VCN static power gating + * + * @adev: amdgpu_device pointer + * + * Enable static power gating for VCN block + */ +static void vcn_v4_0_3_enable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + /* Before power off, this indicator has to be turned on */ + data = RREG32_SOC15(VCN, 0, regUVD_POWER_STATUS); + data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; + data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + WREG32_SOC15(VCN, 0, regUVD_POWER_STATUS, data); + + data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); + WREG32_SOC15(VCN, 0, regUVD_PGFSM_CONFIG, data); + + data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF); + } +} + +/** + * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating + * + * @adev: amdgpu_device pointer + * + * Disable clock gating for VCN block + */ +static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + /* VCN disable CGC */ + data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, regUVD_CGC_GATE); + data &= ~(UVD_CGC_GATE__SYS_MASK + | UVD_CGC_GATE__MPEG2_MASK + | UVD_CGC_GATE__REGS_MASK + | UVD_CGC_GATE__RBC_MASK + | UVD_CGC_GATE__LMI_MC_MASK + | UVD_CGC_GATE__LMI_UMC_MASK + | UVD_CGC_GATE__MPC_MASK + | UVD_CGC_GATE__LBSI_MASK + | UVD_CGC_GATE__LRBBM_MASK + | UVD_CGC_GATE__WCB_MASK + | UVD_CGC_GATE__VCPU_MASK + | UVD_CGC_GATE__MMSCH_MASK); + + WREG32_SOC15(VCN, 0, regUVD_CGC_GATE, data); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_CGC_GATE, 0, 0xFFFFFFFF); + + data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); + data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_GATE); + data |= (UVD_SUVD_CGC_GATE__SRE_MASK + | UVD_SUVD_CGC_GATE__SIT_MASK + | UVD_SUVD_CGC_GATE__SMP_MASK + | UVD_SUVD_CGC_GATE__SCM_MASK + | UVD_SUVD_CGC_GATE__SDB_MASK + | UVD_SUVD_CGC_GATE__SRE_H264_MASK + | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK + | UVD_SUVD_CGC_GATE__SIT_H264_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCM_H264_MASK + | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK + | UVD_SUVD_CGC_GATE__SDB_H264_MASK + | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK + | UVD_SUVD_CGC_GATE__ENT_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK + | UVD_SUVD_CGC_GATE__SITE_MASK + | UVD_SUVD_CGC_GATE__SRE_VP9_MASK + | UVD_SUVD_CGC_GATE__SCM_VP9_MASK + | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK + | UVD_SUVD_CGC_GATE__SDB_VP9_MASK + | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); + WREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_GATE, data); + + data = RREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL); + data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL, data); +} + +/** + * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode + * + * @adev: amdgpu_device pointer + * @sram_sel: sram select + * @indirect: indirectly write sram + * + * Disable clock gating for VCN block with dpg mode + */ +static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, + uint8_t indirect) +{ + uint32_t reg_data = 0; + + /* enable sw clock gating control */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect); + + /* turn off clock gating */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect); + + /* turn on SUVD clock gating */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + + /* turn on sw mode in UVD_SUVD_CGC_CTRL */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); +} + +/** + * vcn_v4_0_enable_clock_gating - enable VCN clock gating + * + * @adev: amdgpu_device pointer + * + * Enable clock gating for VCN block + */ +static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + /* enable VCN CGC */ + data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); + data |= (UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK); + WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL); + data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL, data); +} + +/** + * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode + * + * @adev: amdgpu_device pointer + * @indirect: indirectly write sram + * + * Start VCN block with dpg mode + */ +static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) +{ + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_ring *ring; + uint32_t tmp; + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, 0, regUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(VCN, 0, regUVD_POWER_STATUS, tmp); + + if (indirect) + adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr; + + /* enable clock gating */ + vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, indirect); + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interrupt */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect); + + /* setup regUVD_LMI_CTRL */ + tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect); + + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_MPC_CNTL), + 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); + + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_MPC_SET_MUXA0), + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_MPC_SET_MUXB0), + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_MPC_SET_MUX), + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); + + vcn_v4_0_3_mc_resume_dpg_mode(adev, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable LMI MC and UMC channels */ + tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + VCN, 0, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + if (indirect) + psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); + + ring = &adev->vcn.inst->ring_dec; + fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + + /* program the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, 0, regUVD_RB_BASE_LO4, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, 0, regUVD_RB_BASE_HI4, + upper_32_bits(ring->gpu_addr)); + + WREG32_SOC15(VCN, 0, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); + + /* resetting ring, fw should not check RB ring */ + tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, 0, regUVD_RB_RPTR4, 0); + WREG32_SOC15(VCN, 0, regUVD_RB_WPTR4, 0); + ring->wptr = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); + + tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB4_EN_MASK; + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); + + WREG32_SOC15(VCN, 0, regUVD_SCRATCH2, 0); + + /* Reset FW shared memory RBC WPTR/RPTR */ + fw_shared->rb.rptr = 0; + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + + /*resetting done, fw can check RB ring */ + fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + return 0; +} + +/** + * vcn_v4_0_3_start - VCN start + * + * @adev: amdgpu_device pointer + * + * Start VCN block + */ +static int vcn_v4_0_3_start(struct amdgpu_device *adev) +{ + volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t tmp; + int j, k, r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return vcn_v4_0_3_start_dpg_mode(adev, adev->vcn.indirect_sram); + + /* disable VCN power gating */ + vcn_v4_0_3_disable_static_power_gating(adev); + + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, 0, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, 0, regUVD_STATUS, tmp); + + /*SW clock gating */ + vcn_v4_0_3_disable_clock_gating(adev); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + tmp = RREG32_SOC15(VCN, 0, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); + + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, 0, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, 0, regUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + /* setup regUVD_MPC_CNTL */ + tmp = RREG32_SOC15(VCN, 0, regUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, 0, regUVD_MPC_CNTL, tmp); + + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(VCN, 0, regUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(VCN, 0, regUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + + /* setup UVD_MPC_SET_MUX */ + WREG32_SOC15(VCN, 0, regUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + + vcn_v4_0_3_mc_resume(adev); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, 0, regUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, 0, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, 0, regUVD_STATUS); + if (status & 2) + break; + if (amdgpu_emu_mode == 1) + msleep(500); + else + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_DEV_ERROR(adev->dev, + "VCN decode not responding, trying to reset the VCPU!!!\n"); + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + + if (r) { + DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n"); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + ring = &adev->vcn.inst->ring_dec; + + fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + + /* program the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, 0, regUVD_RB_BASE_LO4, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, 0, regUVD_RB_BASE_HI4, + upper_32_bits(ring->gpu_addr)); + + WREG32_SOC15(VCN, 0, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); + + /* resetting ring, fw should not check RB ring */ + tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, 0, regUVD_RB_RPTR4, 0); + WREG32_SOC15(VCN, 0, regUVD_RB_WPTR4, 0); + + tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB4_EN_MASK; + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); + + ring->wptr = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); + fw_shared->rb.wptr = cpu_to_le32(lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + return 0; +} + +/** + * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode + * + * @adev: amdgpu_device pointer + * + * Stop VCN block with dpg mode + */ +static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + tmp = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_RB_RPTR2, tmp, 0xFFFFFFFF); + + tmp = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_RB_RPTR4, tmp, 0xFFFFFFFF); + + SOC15_WAIT_ON_RREG(VCN, 0, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + return 0; +} + +/** + * vcn_v4_0_3_stop - VCN stop + * + * @adev: amdgpu_device pointer + * + * Stop VCN block + */ +static int vcn_v4_0_3_stop(struct amdgpu_device *adev) +{ + uint32_t tmp; + int r = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v4_0_3_stop_dpg_mode(adev); + goto Done; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, 0, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, 0, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* stall UMC channel */ + tmp = RREG32_SOC15(VCN, 0, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, 0, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, 0, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* Unblock VCPU Register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* reset LMI UMC/LMI/VCPU */ + tmp = RREG32_SOC15(VCN, 0, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); + + tmp = RREG32_SOC15(VCN, 0, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); + + tmp = RREG32_SOC15(VCN, 0, regUVD_VCPU_CNTL); + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); + + /* clear VCN status */ + WREG32_SOC15(VCN, 0, regUVD_STATUS, 0); + + /* apply HW clock gating */ + vcn_v4_0_3_enable_clock_gating(adev); + + /* enable VCN power gating */ + vcn_v4_0_3_enable_static_power_gating(adev); + +Done: + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + + return 0; +} + +/** + * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, + struct dpg_pause_state *new_state) +{ + + return 0; +} + +/** + * vcn_v4_0_3_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t vcn_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR4); +} + +/** + * vcn_v4_0_3_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t vcn_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR4); +} + +/** + * vcn_v4_0_3_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vcn_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + volatile struct amdgpu_fw_shared *fw_shared; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ + fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + WREG32_SOC15(VCN, ring->me, regUVD_SCRATCH2, + lower_32_bits(ring->wptr)); + } + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR4, lower_32_bits(ring->wptr)); + } +} + +static const struct amdgpu_ring_funcs vcn_v4_0_3_dec_sw_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_DEC, + .align_mask = 0x3f, + .nop = VCN_DEC_SW_CMD_NO_OP, + .get_rptr = vcn_v4_0_3_dec_ring_get_rptr, + .get_wptr = vcn_v4_0_3_dec_ring_get_wptr, + .set_wptr = vcn_v4_0_3_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + VCN_SW_RING_EMIT_FRAME_SIZE, + .emit_ib_size = 5, /* vcn_dec_sw_ring_emit_ib */ + .emit_ib = vcn_dec_sw_ring_emit_ib, + .emit_fence = vcn_dec_sw_ring_emit_fence, + .emit_vm_flush = vcn_dec_sw_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_dec_sw_ring_test_ring, + .test_ib = amdgpu_vcn_dec_sw_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_dec_sw_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_dec_sw_ring_emit_wreg, + .emit_reg_wait = vcn_dec_sw_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +/** + * vcn_v4_0_3_set_dec_ring_funcs - set dec ring functions + * + * @adev: amdgpu_device pointer + * + * Set decode ring functions + */ +static void vcn_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->vcn.inst->ring_dec.funcs = &vcn_v4_0_3_dec_sw_ring_vm_funcs; + adev->vcn.inst->ring_dec.me = 0; + DRM_DEV_INFO(adev->dev, "VCN decode(Software Ring) is enabled in VM mode\n"); +} + +/** + * vcn_v4_0_3_is_idle - check VCN block is idle + * + * @handle: amdgpu_device pointer + * + * Check whether VCN block is idle + */ +static bool vcn_v4_0_3_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return (RREG32_SOC15(VCN, 0, regUVD_STATUS) == UVD_STATUS__IDLE); +} + +/** + * vcn_v4_0_3_wait_for_idle - wait for VCN block idle + * + * @handle: amdgpu_device pointer + * + * Wait for VCN block idle + */ +static int vcn_v4_0_3_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return SOC15_WAIT_ON_RREG(VCN, 0, regUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); +} + +/** + * vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state + * + * @handle: amdgpu_device pointer + * @state: clock gating state + * + * Set VCN block clockgating state + */ +static int vcn_v4_0_3_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (RREG32_SOC15(VCN, 0, regUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v4_0_3_enable_clock_gating(adev); + } else { + vcn_v4_0_3_disable_clock_gating(adev); + } + + return 0; +} + +/** + * vcn_v4_0_3_set_powergating_state - set VCN block powergating state + * + * @handle: amdgpu_device pointer + * @state: power gating state + * + * Set VCN block powergating state + */ +static int vcn_v4_0_3_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->vcn.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v4_0_3_stop(adev); + else + ret = vcn_v4_0_3_start(adev); + + if (!ret) + adev->vcn.cur_state = state; + + return ret; +} + +/** + * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block interrupt state + */ +static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +/** + * vcn_v4_0_process_interrupt - process VCN block interrupt + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @entry: interrupt entry from clients and sources + * + * Process VCN block interrupt + */ +static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: + amdgpu_fence_process(&adev->vcn.inst->ring_dec); + break; + default: + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = { + .set = vcn_v4_0_3_set_interrupt_state, + .process = vcn_v4_0_3_process_interrupt, +}; + +/** + * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions + * + * @adev: amdgpu_device pointer + * + * Set VCN block interrupt irq functions + */ +static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.inst->irq.num_types = 1; + adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; + } +} + +static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { + .name = "vcn_v4_0_3", + .early_init = vcn_v4_0_3_early_init, + .late_init = NULL, + .sw_init = vcn_v4_0_3_sw_init, + .sw_fini = vcn_v4_0_3_sw_fini, + .hw_init = vcn_v4_0_3_hw_init, + .hw_fini = vcn_v4_0_3_hw_fini, + .suspend = vcn_v4_0_3_suspend, + .resume = vcn_v4_0_3_resume, + .is_idle = vcn_v4_0_3_is_idle, + .wait_for_idle = vcn_v4_0_3_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, + .set_powergating_state = vcn_v4_0_3_set_powergating_state, +}; + +const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 4, + .minor = 0, + .rev = 3, + .funcs = &vcn_v4_0_3_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h new file mode 100644 index 000000000000..0b046114373a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h @@ -0,0 +1,29 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCN_V4_0_3_H__ +#define __VCN_V4_0_3_H__ + +extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block; + +#endif /* __VCN_V4_0_3_H__ */ From 380302f8b894a11fcd84a08aadf6a858eb003b0b Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sun, 9 Jan 2022 16:03:25 -0500 Subject: [PATCH 254/861] drm/amdgpu/jpeg: enable jpeg cg for VCN4_0_3 Enable jpeg cg for VCN4_0_3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 3221522e71e8..d68ebf6b6856 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1095,7 +1095,8 @@ static int soc15_common_early_init(void *handle) break; case IP_VERSION(9, 4, 3): adev->asic_funcs = &vega20_asic_funcs; - adev->cg_flags = 0; + adev->cg_flags = + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = 0; break; default: From b7179fc29f50d837090d7b0c1f224e3a116cdcc2 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sun, 9 Jan 2022 16:04:05 -0500 Subject: [PATCH 255/861] drm/amdgpu/jpeg: enable jpeg pg for VCN4_0_3 Enable jpeg pg for VCN4_0_3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index d68ebf6b6856..1939e88ef421 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1097,7 +1097,8 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_JPEG_MGCG; - adev->pg_flags = 0; + adev->pg_flags = + AMD_PG_SUPPORT_JPEG; break; default: /* FIXME: not supported yet */ From 342397db6d09068e335d42a97e0879b7f5f99364 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sun, 9 Jan 2022 16:04:41 -0500 Subject: [PATCH 256/861] drm/amdgpu/vcn: enable vcn cg for VCN4_0_3 Enable vcn cg for VCN4_0_3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 1939e88ef421..abc10b20c83c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1096,6 +1096,7 @@ static int soc15_common_early_init(void *handle) case IP_VERSION(9, 4, 3): adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = + AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_JPEG; From ef3aa0b40c4b1f4d8b7db8582833cb61fc673f60 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sun, 9 Jan 2022 16:05:07 -0500 Subject: [PATCH 257/861] drm/amdgpu/vcn: enable vcn pg for VCN4_0_3 Enable vcn pg for VCN4_0_3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index abc10b20c83c..ee00302950c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1099,6 +1099,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = + AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG; break; default: From 31c0ec84f92cd40cfae210dac59413372996b5e7 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sun, 9 Jan 2022 16:05:31 -0500 Subject: [PATCH 258/861] drm/amdgpu/vcn: enable vcn DPG mode for VCN4_0_3 Enable vcn DPG mode for VCN4_0_3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index ee00302950c2..b7e8af56df84 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1100,6 +1100,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; break; default: From bc224553843e526bad4bb91188363aea1664a70d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 24 May 2022 12:03:03 +0800 Subject: [PATCH 259/861] drm/amdgpu/jpeg: add multiple jpeg rings support Add multiple jpeg rings support. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 21 +++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 18 +++++++++--------- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 12 ++++++------ 10 files changed, 69 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index b07c000fc8ba..388466a5f730 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -45,13 +45,14 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) { - int i; + int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) continue; - amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) + amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]); } mutex_destroy(&adev->jpeg.jpeg_pg_lock); @@ -76,13 +77,14 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, jpeg.idle_work.work); unsigned int fences = 0; - unsigned int i; + unsigned int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) continue; - fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]); } if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) @@ -122,17 +124,17 @@ int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return 0; - WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); + WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe], 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; - amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0)); + amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -161,8 +163,7 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, ib = &job->ibs[0]; - ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, - PACKETJ_TYPE0); + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0, 0, PACKETJ_TYPE0); ib->ptr[1] = 0xDEADBEEF; for (i = 2; i < 16; i += 2) { ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); @@ -208,7 +209,7 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) } if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]); if (tmp == 0xDEADBEEF) break; udelay(1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 0ca76f0f23e9..cb6c127ab81d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -27,16 +27,17 @@ #include "amdgpu_ras.h" #define AMDGPU_MAX_JPEG_INSTANCES 2 +#define AMDGPU_MAX_JPEG_RINGS 8 #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) struct amdgpu_jpeg_reg{ - unsigned jpeg_pitch; + unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS]; }; struct amdgpu_jpeg_inst { - struct amdgpu_ring ring_dec; + struct amdgpu_ring ring_dec[AMDGPU_MAX_JPEG_RINGS]; struct amdgpu_irq_src irq; struct amdgpu_jpeg_reg external; }; @@ -48,6 +49,7 @@ struct amdgpu_jpeg_ras { struct amdgpu_jpeg { uint8_t num_jpeg_inst; struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; + unsigned num_jpeg_rings; struct amdgpu_jpeg_reg internal; unsigned harvest_config; struct delayed_work idle_work; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1d3b224b8b28..44997c7ee89d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -462,8 +462,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->jpeg.harvest_config & (1 << i)) continue; - if (adev->jpeg.inst[i].ring_dec.sched.ready) - ++num_rings; + for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) + if (adev->jpeg.inst[i].ring_dec[j].sched.ready) + ++num_rings; } ib_start_alignment = 16; ib_size_alignment = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 71fe7f6f9889..1c5b60604a19 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -437,7 +437,7 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case 126: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + amdgpu_fence_process(adev->jpeg.inst->ring_dec); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -484,7 +484,7 @@ int jpeg_v1_0_sw_init(void *handle) if (r) return r; - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, @@ -492,7 +492,7 @@ int jpeg_v1_0_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch = + adev->jpeg.internal.jpeg_pitch[0] = adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); return 0; @@ -509,7 +509,7 @@ void jpeg_v1_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec); + amdgpu_ring_fini(adev->jpeg.inst->ring_dec); } /** @@ -522,7 +522,7 @@ void jpeg_v1_0_sw_fini(void *handle) */ void jpeg_v1_0_start(struct amdgpu_device *adev, int mode) { - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; if (mode == 0) { WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); @@ -579,7 +579,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs; + adev->jpeg.inst->ring_dec->funcs = &jpeg_v1_0_decode_ring_vm_funcs; DRM_INFO("JPEG decode is enabled in VM mode\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 3a43e42f4834..3aeeceae34a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -83,7 +83,7 @@ static int jpeg_v2_0_sw_init(void *handle) if (r) return r; - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; ring->vm_hub = AMDGPU_MMHUB0(0); @@ -93,8 +93,8 @@ static int jpeg_v2_0_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); return 0; } @@ -129,7 +129,7 @@ static int jpeg_v2_0_sw_fini(void *handle) static int jpeg_v2_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -312,7 +312,7 @@ static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device *adev) */ static int jpeg_v2_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; if (adev->pm.dpm_enabled) @@ -729,7 +729,7 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + amdgpu_fence_process(adev->jpeg.inst->ring_dec); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -791,7 +791,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs; + adev->jpeg.inst->ring_dec->funcs = &jpeg_v2_0_dec_ring_vm_funcs; DRM_INFO("JPEG decode is enabled in VM mode\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 259b7ba6a842..b79edb12b90e 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -125,7 +125,7 @@ static int jpeg_v2_5_sw_init(void *handle) if (adev->jpeg.harvest_config & (1 << i)) continue; - ring = &adev->jpeg.inst[i].ring_dec; + ring = adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) ring->vm_hub = AMDGPU_MMHUB1(0); @@ -138,8 +138,8 @@ static int jpeg_v2_5_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); } r = amdgpu_jpeg_ras_sw_init(adev); @@ -186,7 +186,7 @@ static int jpeg_v2_5_hw_init(void *handle) if (adev->jpeg.harvest_config & (1 << i)) continue; - ring = &adev->jpeg.inst[i].ring_dec; + ring = adev->jpeg.inst[i].ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); @@ -326,7 +326,7 @@ static int jpeg_v2_5_start(struct amdgpu_device *adev) if (adev->jpeg.harvest_config & (1 << i)) continue; - ring = &adev->jpeg.inst[i].ring_dec; + ring = adev->jpeg.inst[i].ring_dec; /* disable anti hang mechanism */ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); @@ -591,7 +591,7 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); + amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec); break; case VCN_2_6__SRCID_DJPEG0_POISON: case VCN_2_6__SRCID_EJPEG0_POISON: @@ -712,10 +712,10 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) if (adev->jpeg.harvest_config & (1 << i)) continue; if (adev->asic_type == CHIP_ARCTURUS) - adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_5_dec_ring_vm_funcs; else /* CHIP_ALDEBARAN */ - adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs; - adev->jpeg.inst[i].ring_dec.me = i; + adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_6_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec->me = i; DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); } } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index c55386c22311..cb5494effc0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -98,7 +98,7 @@ static int jpeg_v3_0_sw_init(void *handle) if (r) return r; - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; ring->vm_hub = AMDGPU_MMHUB0(0); @@ -108,8 +108,8 @@ static int jpeg_v3_0_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); return 0; } @@ -144,7 +144,7 @@ static int jpeg_v3_0_sw_fini(void *handle) static int jpeg_v3_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -330,7 +330,7 @@ static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev) */ static int jpeg_v3_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; if (adev->pm.dpm_enabled) @@ -527,7 +527,7 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + amdgpu_fence_process(adev->jpeg.inst->ring_dec); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -589,7 +589,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs; + adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs; DRM_INFO("JPEG decode is enabled in VM mode\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index d7d5ffc29393..495facb885f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -105,7 +105,7 @@ static int jpeg_v4_0_sw_init(void *handle) if (r) return r; - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); ring->vm_hub = AMDGPU_MMHUB0(0); @@ -116,8 +116,8 @@ static int jpeg_v4_0_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); r = amdgpu_jpeg_ras_sw_init(adev); if (r) @@ -156,7 +156,7 @@ static int jpeg_v4_0_sw_fini(void *handle) static int jpeg_v4_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; if (amdgpu_sriov_vf(adev)) { @@ -363,7 +363,7 @@ static int jpeg_v4_0_enable_static_power_gating(struct amdgpu_device *adev) */ static int jpeg_v4_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; if (adev->pm.dpm_enabled) @@ -441,7 +441,7 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev) table_size = 0; - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW), @@ -678,7 +678,7 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_4_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + amdgpu_fence_process(adev->jpeg.inst->ring_dec); break; case VCN_4_0__SRCID_DJPEG0_POISON: case VCN_4_0__SRCID_EJPEG0_POISON: @@ -744,7 +744,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec.funcs = &jpeg_v4_0_dec_ring_vm_funcs; + adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_dec_ring_vm_funcs; DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 1fc72f9b52ed..784c83994ca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -85,7 +85,7 @@ static int jpeg_v4_0_3_sw_init(void *handle) if (r) return r; - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; ring->use_doorbell = false; ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); @@ -94,8 +94,8 @@ static int jpeg_v4_0_3_sw_init(void *handle) if (r) return r; - adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); return 0; } @@ -130,7 +130,7 @@ static int jpeg_v4_0_3_sw_fini(void *handle) static int jpeg_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; r = amdgpu_ring_test_helper(ring); @@ -254,7 +254,7 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) */ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; WREG32_SOC15(JPEG, 0, regUVD_PGFSM_CONFIG, 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); @@ -675,7 +675,7 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + amdgpu_fence_process(adev->jpeg.inst->ring_dec); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -737,8 +737,8 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec.funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; - adev->jpeg.inst->ring_dec.me = 0; + adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; + adev->jpeg.inst->ring_dec->me = 0; DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index f877c39c7cdd..16feb491adf5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -211,7 +211,7 @@ static int vcn_v1_0_hw_init(void *handle) goto done; } - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -1304,7 +1304,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); /* Restore */ - ring = &adev->jpeg.inst->ring_dec; + ring = adev->jpeg.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | @@ -1802,7 +1802,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -1810,7 +1810,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work) adev->vcn.pause_dpg_mode(adev, 0, &new_state); } - fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); + fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec); fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); if (fences == 0) { @@ -1832,7 +1832,7 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) mutex_lock(&adev->vcn.vcn1_jpeg1_workaround); - if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec)) + if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec)) DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n"); vcn_v1_0_set_pg_for_begin_use(ring, set_clocks); @@ -1864,7 +1864,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; From db77081fe3c88a31eaade8a9c565c48c4d51b093 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 19 Jan 2022 23:32:43 -0500 Subject: [PATCH 260/861] drm/amdgpu/jpeg: add multiple jpeg rings support for vcn4_0_3 Add multiple jpeg rings support for vcn4_0_3 Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 208 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h | 1 + 2 files changed, 144 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 784c83994ca1..0d3509409d3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -29,7 +29,7 @@ #include "vcn/vcn_4_0_3_offset.h" #include "vcn/vcn_4_0_3_sh_mask.h" -#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" +#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" enum jpeg_engin_status { UVD_PGFSM_STATUS__UVDJ_PWR_ON = 0, @@ -41,6 +41,17 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v4_0_3_set_powergating_state(void *handle, enum amd_powergating_state state); +static int amdgpu_ih_srcid_jpeg[] = { + VCN_4_0__SRCID__JPEG_DECODE, + VCN_4_0__SRCID__JPEG1_DECODE, + VCN_4_0__SRCID__JPEG2_DECODE, + VCN_4_0__SRCID__JPEG3_DECODE, + VCN_4_0__SRCID__JPEG4_DECODE, + VCN_4_0__SRCID__JPEG5_DECODE, + VCN_4_0__SRCID__JPEG6_DECODE, + VCN_4_0__SRCID__JPEG7_DECODE +}; + /** * jpeg_v4_0_3_early_init - set function pointers * @@ -69,13 +80,15 @@ static int jpeg_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int r; + int i, r; - /* JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); - if (r) - return r; + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + amdgpu_ih_srcid_jpeg[i], &adev->jpeg.inst->irq); + if (r) + return r; + } r = amdgpu_jpeg_sw_init(adev); if (r) @@ -85,17 +98,22 @@ static int jpeg_v4_0_3_sw_init(void *handle) if (r) return r; - ring = adev->jpeg.inst->ring_dec; - ring->use_doorbell = false; - ring->vm_hub = AMDGPU_MMHUB0(0); - sprintf(ring->name, "jpeg_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) - return r; + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + ring = &adev->jpeg.inst->ring_dec[i]; + ring->use_doorbell = false; + ring->vm_hub = AMDGPU_MMHUB0(0); + sprintf(ring->name, "jpeg_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; - adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[i] = + regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[i] = + SOC15_REG_OFFSET1(JPEG, 0, regUVD_JRBC0_UVD_JRBC_SCRATCH0, + (i?(0x40 * i - 0xc80):0)); + } return 0; } @@ -130,14 +148,18 @@ static int jpeg_v4_0_3_sw_fini(void *handle) static int jpeg_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; - int r; + struct amdgpu_ring *ring; + int i, r; - r = amdgpu_ring_test_helper(ring); - if (!r) - DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + ring = &adev->jpeg.inst->ring_dec[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); - return r; + return 0; } /** @@ -150,13 +172,14 @@ static int jpeg_v4_0_3_hw_init(void *handle) static int jpeg_v4_0_3_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; cancel_delayed_work_sync(&adev->jpeg.idle_work); if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) - jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE); + ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE); - return 0; + return ret; } /** @@ -204,6 +227,7 @@ static int jpeg_v4_0_3_resume(void *handle) static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + int i; data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) @@ -216,16 +240,16 @@ static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); - data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); + data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) + data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i); WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); } static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + int i; data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) @@ -238,10 +262,9 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); - data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); + data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) + data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i); WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); } @@ -255,6 +278,7 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) static int jpeg_v4_0_3_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + int i; WREG32_SOC15(JPEG, 0, regUVD_PGFSM_CONFIG, 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); @@ -280,22 +304,32 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, ~UVD_JMI_CNTL__SOFT_RESET_MASK); - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC0_MASK, - ~JPEG_SYS_INT_EN__DJRBC0_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + unsigned int reg_offset = (i?(0x40 * i - 0xc80):0); - WREG32_SOC15(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR); + ring = &adev->jpeg.inst->ring_dec[i]; + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC0_MASK << i, + ~(JPEG_SYS_INT_EN__DJRBC0_MASK << i)); + + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, + (0x00000001L | 0x00000002L)); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, + reg_offset, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + reg_offset, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR, reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, + 0x00000002L); + WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE, reg_offset, + ring->ring_size / 4); + ring->wptr = RREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, + reg_offset); + } return 0; } @@ -342,7 +376,8 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_RPTR); + return RREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_RPTR, + ring->pipe?(0x40 * ring->pipe - 0xc80):0); } /** @@ -359,7 +394,8 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR); + return RREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR, + ring->pipe?(0x40 * ring->pipe - 0xc80):0); } /** @@ -377,8 +413,8 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(JPEG, ring->me, - regUVD_JRBC0_UVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR, + (ring->pipe?(0x40 * ring->pipe - 0xc80):0), lower_32_bits(ring->wptr)); } } @@ -393,7 +429,7 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x62a04);/* TODO: PCTL0_MMHUB_DEEPSLEEP_IB */ + amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); @@ -605,20 +641,36 @@ static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) static bool jpeg_v4_0_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool ret; + int i; - return ((RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS) & - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + unsigned int reg_offset = (i?(0x40 * i - 0xc80):0); + + ret &= ((RREG32_SOC15_OFFSET(JPEG, 0, + regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset) & + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + } + + return ret; } static int jpeg_v4_0_3_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + unsigned int reg_offset = (i?(0x40 * i - 0xc80):0); + + ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, 0, + regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset, + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + } - ret = SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS, - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); return ret; } @@ -626,7 +678,7 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; if (enable) { if (!jpeg_v4_0_3_is_idle(handle)) @@ -674,8 +726,29 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); switch (entry->src_id) { - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(adev->jpeg.inst->ring_dec); + case VCN_4_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[0]); + break; + case VCN_4_0__SRCID__JPEG1_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[1]); + break; + case VCN_4_0__SRCID__JPEG2_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[2]); + break; + case VCN_4_0__SRCID__JPEG3_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[3]); + break; + case VCN_4_0__SRCID__JPEG4_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[4]); + break; + case VCN_4_0__SRCID__JPEG5_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[5]); + break; + case VCN_4_0__SRCID__JPEG6_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[6]); + break; + case VCN_4_0__SRCID__JPEG7_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec[7]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -737,8 +810,13 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; - adev->jpeg.inst->ring_dec->me = 0; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + adev->jpeg.inst->ring_dec[i].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; + adev->jpeg.inst->ring_dec[i].me = 0; + adev->jpeg.inst->ring_dec[i].pipe = i; + } DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); } @@ -749,7 +827,7 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = { static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.num_types = adev->jpeg.num_jpeg_rings; adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h index ca03d17e13fa..70a5f030d5f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h @@ -41,6 +41,7 @@ #define regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 #define regUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x4043 +#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 From bfb44eacb0e2de63bc7824cc590ede51a02a7ded Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Thu, 27 Jan 2022 09:02:16 -0500 Subject: [PATCH 261/861] drm/amdkfd: Set F8_MODE for gc_v9_4_3 Set F8_MODE for GC 9.4.3 as optimal/non-IEEE. Also update gc_v9_0 to gc_v9_4_3 to include more definitions such as the F8_MODE bit, and remove unused header files. v2: fix IP version check (Alex) Signed-off-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 8b2dd2670ab7..914d94679d73 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -24,9 +24,7 @@ #include "kfd_device_queue_manager.h" #include "vega10_enum.h" -#include "gc/gc_9_0_offset.h" -#include "gc/gc_9_0_sh_mask.h" -#include "sdma0/sdma0_4_0_sh_mask.h" +#include "gc/gc_9_4_3_sh_mask.h" static int update_qpd_v9(struct device_queue_manager *dqm, struct qcm_process_device *qpd); @@ -65,6 +63,10 @@ static int update_qpd_v9(struct device_queue_manager *dqm, if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3)) + qpd->sh_mem_config |= + (1 << SH_MEM_CONFIG__F8_MODE__SHIFT); + qpd->sh_mem_ape1_limit = 0; qpd->sh_mem_ape1_base = 0; } From d7fd2a9e394b5f2bb900fc4e1e04e8dd4a97a7be Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 29 Jan 2022 10:24:26 -0500 Subject: [PATCH 262/861] drm/amdgpu/nbio: update vcn doorbell range VCN4.0.3 used up to 16 doorbells per partition. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index bdb84a53f0e4..266b504fd83e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -161,7 +161,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, BIF_DOORBELL0_RANGE_SIZE_ENTRY, - 0x8); + 0x10); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -174,7 +174,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, - S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8); + S2A_DOORBELL_PORT1_RANGE_SIZE, 0x10); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4); From c21d446ba7a83ab9f15fae8f9c732bb94f8a5677 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 29 Jan 2022 10:28:26 -0500 Subject: [PATCH 263/861] drm/amdgpu/vcn: enable vcn doorbell for vcn4.0.3 Enable vcn doorbell for vcn4.0.3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 98bff162f453..4418c9f05ec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -109,7 +109,8 @@ static int vcn_v4_0_3_sw_init(void *handle) return r; ring = &adev->vcn.inst->ring_dec; - ring->use_doorbell = false; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 5; ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -174,6 +175,13 @@ static int vcn_v4_0_3_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int r; + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ring->doorbell_index, ring->me); + if (ring->use_doorbell) + WREG32_SOC15(VCN, ring->me, regVCN_RB4_DB_CTRL, + ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | + VCN_RB4_DB_CTRL__EN_MASK); + r = amdgpu_ring_test_helper(ring); if (!r) From 6ddae0f3ab18a64e83bcf7b090e085394046f130 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 29 Jan 2022 10:34:05 -0500 Subject: [PATCH 264/861] drm/amdgpu/jpeg: enable jpeg doorbell for jpeg4.0.3 Enable jpeg doorbell for jpeg4.0.3. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 0d3509409d3a..8914f3c6c80f 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -100,8 +100,9 @@ static int jpeg_v4_0_3_sw_init(void *handle) for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { ring = &adev->jpeg.inst->ring_dec[i]; - ring->use_doorbell = false; + ring->use_doorbell = true; ring->vm_hub = AMDGPU_MMHUB0(0); + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (i?8:1) + i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -148,11 +149,19 @@ static int jpeg_v4_0_3_sw_fini(void *handle) static int jpeg_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int i, r; + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { ring = &adev->jpeg.inst->ring_dec[i]; + if (ring->use_doorbell) + WREG32_SOC15_OFFSET(VCN, 0, regVCN_JPEG_DB_CTRL, + (ring->pipe?(ring->pipe - 0x15):0), + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); r = amdgpu_ring_test_helper(ring); if (r) return r; From ae972ed5e0dcb3fab57020e243563cc484b8969a Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 7 Feb 2022 10:25:47 -0500 Subject: [PATCH 265/861] drm/amdgpu: fix vcn doorbell range setting Should use vcn_ring0_1 instead of doorbell index to set nbio doorbell range. Signed-off-by: James Zhu Reviewed-by: Sonny Jiang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 4418c9f05ec4..ae2cc47d344a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -176,7 +176,7 @@ static int vcn_v4_0_3_hw_init(void *handle) int r; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ring->doorbell_index, ring->me); + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), ring->me); if (ring->use_doorbell) WREG32_SOC15(VCN, ring->me, regVCN_RB4_DB_CTRL, ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | From 89cf4549a949b4ba3ce771163b75285979c95602 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 23 May 2022 13:53:45 +0800 Subject: [PATCH 266/861] drm/amdgpu: support gc v9_4_3 ring_test running on all xcc Each xcc has its own sratch_reg offset Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ad3e8cefbdb2..4ef39837e4c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -227,20 +227,23 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) uint32_t tmp = 0; unsigned i; int r; + /* scratch_reg0_offset is 32bit even with full XCD config */ + uint32_t scratch_reg0_offset; + + scratch_reg0_offset = SOC15_REG_OFFSET(GC, ring->xcc_id, regSCRATCH_REG0); + WREG32(scratch_reg0_offset, 0xCAFEDEAD); - WREG32_SOC15(GC, 0, regSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0) - - PACKET3_SET_UCONFIG_REG_START); + amdgpu_ring_write(ring, scratch_reg0_offset - PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SOC15(GC, 0, regSCRATCH_REG0); + tmp = RREG32(scratch_reg0_offset); if (tmp == 0xDEADBEEF) break; udelay(1); From 147862d00bcf7e23e0a125f910f5db224f7b6722 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Fri, 3 Jun 2022 11:08:12 +0800 Subject: [PATCH 267/861] drm/amdgpu: enable the ring and IB test for slave kcq With the mec FW update to utilize the mqd base set by driver for kcq mapping, slave kcq ring test and IB test can be re-enabled. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 61 ++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 -- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 12 ++--- 3 files changed, 33 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 4a4d71ff9b95..682c157f2d8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -449,8 +449,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[j]) + adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL); + if (!adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings]) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } } @@ -502,22 +502,20 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) return -EINVAL; spin_lock(&kiq->ring_lock); - if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * - adev->gfx.num_compute_rings)) { - spin_unlock(&kiq->ring_lock); - return -ENOMEM; - } - - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_unmap_queues(kiq_ring, - &adev->gfx.compute_ring[i], - RESET_QUEUES, 0, 0); - } + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * + adev->gfx.num_compute_rings)) { + spin_unlock(&kiq->ring_lock); + return -ENOMEM; } - if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_unmap_queues(kiq_ring, + &adev->gfx.compute_ring[i], + RESET_QUEUES, 0, 0); + } + + if (kiq_ring->sched.ready && !adev->job_hang) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); @@ -598,26 +596,23 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); spin_lock(&kiq->ring_lock); - /* No need to map kcq on the slave */ - if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { - r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * - adev->gfx.num_compute_rings + - kiq->pmf->set_resources_size); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&kiq->ring_lock); - return r; - } + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * + adev->gfx.num_compute_rings + + kiq->pmf->set_resources_size); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + spin_unlock(&kiq->ring_lock); + return r; + } - if (adev->enable_mes) - queue_mask = ~0ULL; + if (adev->enable_mes) + queue_mask = ~0ULL; - kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; kiq->pmf->kiq_map_queues(kiq_ring, - &adev->gfx.compute_ring[i]); - } + &adev->gfx.compute_ring[j]); } r = amdgpu_ring_test_helper(kiq_ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index c955c3f060cd..b27ac48ca123 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -433,11 +433,6 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) else tmo = tmo_gfx; - /* skip ib test on the slave kcq */ - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && - !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id)) - continue; - r = amdgpu_ring_test_ib(ring, tmo); if (!r) { DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 4ef39837e4c7..ec273a217666 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1956,13 +1956,11 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) if (r) return r; - /* skip ring test on slave kcq */ - if (amdgpu_gfx_is_master_xcc(adev, i)) { - for (j = 0; j < adev->gfx.num_compute_rings; j++) { - ring = &adev->gfx.compute_ring[j + - i * adev->gfx.num_compute_rings]; - amdgpu_ring_test_helper(ring); - } + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; } gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); From 15091a6f4380a0c1a7202d52e82cdaaf80e2bb70 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 19 Nov 2021 18:03:34 +0800 Subject: [PATCH 268/861] drm/amdgpu: add node_id to physical id conversion in EOP handler A new field nodeid in interrupt cookie indicates the node ID. Signed-off-by: Le Ma Reviewed-by: Shiwu Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 14 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 6 ++++-- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index fafebec5b7b6..031610c1340a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -99,6 +99,17 @@ const char *soc15_ih_clientid_name[] = { "MP1" }; +const int node_id_to_phys_map[NODEID_MAX] = { + [XCD0_NODEID] = 0, + [XCD1_NODEID] = 1, + [XCD2_NODEID] = 2, + [XCD3_NODEID] = 3, + [XCD4_NODEID] = 4, + [XCD5_NODEID] = 5, + [XCD6_NODEID] = 6, + [XCD7_NODEID] = 7, +}; + /** * amdgpu_irq_disable_all - disable *all* interrupts * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 1c747ac4129a..efe8a278cbdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -102,6 +102,20 @@ struct amdgpu_irq { bool retry_cam_enabled; }; +enum interrupt_node_id_per_xcp { + XCD0_NODEID = 1, + XCD1_NODEID = 2, + XCD2_NODEID = 5, + XCD3_NODEID = 6, + XCD4_NODEID = 9, + XCD5_NODEID = 10, + XCD6_NODEID = 13, + XCD7_NODEID = 14, + NODEID_MAX, +}; + +extern const int node_id_to_phys_map[NODEID_MAX]; + void amdgpu_irq_disable_all(struct amdgpu_device *adev); int amdgpu_irq_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ec273a217666..ce64c4fc5f1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2799,7 +2799,7 @@ static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - int i; + int i, phys_id; u8 me_id, pipe_id, queue_id; struct amdgpu_ring *ring; @@ -2808,12 +2808,14 @@ static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, pipe_id = (entry->ring_id & 0x03) >> 0; queue_id = (entry->ring_id & 0x70) >> 4; + phys_id = node_id_to_phys_map[entry->node_id]; + switch (me_id) { case 0: case 1: case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; + ring = &adev->gfx.compute_ring[i + phys_id * adev->gfx.num_compute_rings]; /* Per-queue interrupt is supported for MEC starting from VI. * The interrupt can only be enabled/disabled per pipe instead of per queue. */ From e6a02e2cc7fe3fec05eeaf08855e57d616a037e1 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 18 Feb 2022 15:04:35 +0530 Subject: [PATCH 269/861] drm/amdgpu: Add some XCC programming Add additional XCC programming sequences. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ce64c4fc5f1a..232feb387a40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1150,6 +1150,29 @@ static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) WREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG, data); } +static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) +{ + uint32_t tmp = 0; + + switch (adev->gfx.num_xcd) { + /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ + case 1: + WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, 0x8); + break; + case 2: + tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID); + tmp = tmp | (adev->gfx.num_xcd << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); + WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, tmp); + + tmp = xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, PHYSICAL_XCC_ID); + tmp = tmp | (xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, XCC_DIE_ID)); + WREG32_SOC15(GC, xcc_id, regCP_PSP_XCP_CTL, tmp); + break; + default: + break; + } +} + static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) { uint32_t rlc_setting; @@ -1948,6 +1971,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) return r; } + /* set the virtual and physical id based on partition_mode */ + gfx_v9_4_3_program_xcc_id(adev, i); + r = gfx_v9_4_3_kiq_resume(adev, i); if (r) return r; From 5cf1675591dd28afc498348757469a87c1e9fcf2 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 28 Feb 2022 12:25:15 +0530 Subject: [PATCH 270/861] drm/amdgpu: Add mode2 reset logic for v13.0.6 Mode2 reset for v13.0.6 has similar workflow as v13.0.2 Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 6437ead87e5f..eec41ad30406 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -40,6 +40,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev) switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): ret = aldebaran_reset_init(adev); break; case IP_VERSION(11, 0, 7): @@ -61,6 +62,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev) switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): ret = aldebaran_reset_fini(adev); break; case IP_VERSION(11, 0, 7): From 8dc1db3172ae2f17ae71e33b608a33411ce8a1aa Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 14 Sep 2022 16:39:48 +0800 Subject: [PATCH 271/861] drm/amdkfd: Introduce kfd_node struct (v5) Introduce a new structure, kfd_node, which will now represent a compute node. kfd_node is carved out of kfd_dev structure. kfd_dev struct now will become the parent of kfd_node, and will store common resources such as doorbells, GTT sub-alloctor etc. kfd_node struct will store all resources specific to a compute node, such as device queue manager, interrupt handling etc. This is the first step in adding compute partition support in KFD. v2: introduce kfd_node struct to gc v11 (Hawking) v3: make reference to kfd_dev struct through kfd_node (Morris) v4: use kfd_node instead for kfd isr/mqd functions (Morris) v5: rebase (Alex) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Hawking Zhang Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 + .../gpu/drm/amd/amdkfd/cik_event_interrupt.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 43 +-- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 28 +- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 260 +++++++++++------- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 100 +++---- .../drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- .../amd/amdkfd/kfd_device_queue_manager_v9.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 12 +- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 12 +- .../gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 6 +- .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 64 ++--- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 22 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 18 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 8 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 6 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 8 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 18 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 24 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 10 +- .../gpu/drm/amd/amdkfd/kfd_packet_manager.c | 2 +- .../drm/amd/amdkfd/kfd_packet_manager_v9.c | 8 +- .../drm/amd/amdkfd/kfd_packet_manager_vi.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 168 ++++++----- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 54 ++-- .../amd/amdkfd/kfd_process_queue_manager.c | 20 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 40 +-- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 56 ++-- drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 8 +- 38 files changed, 574 insertions(+), 496 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index de6ba0d4b860..af37f2ef4438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -35,6 +35,7 @@ #include "amdgpu_dma_buf.h" #include #include "amdgpu_xgmi.h" +#include "kfd_priv.h" #include "kfd_smi_events.h" #include diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 5c8023cba196..4ebfff6b6c55 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -26,7 +26,7 @@ #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" -static bool cik_event_interrupt_isr(struct kfd_dev *dev, +static bool cik_event_interrupt_isr(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -85,7 +85,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void cik_event_interrupt_wq(struct kfd_dev *dev, +static void cik_event_interrupt_wq(struct kfd_node *dev, const uint32_t *ih_ring_entry) { const struct cik_ih_ring_entry *ihre = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 81d07ecf666d..eb0b0b38f10e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -293,7 +293,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_create_queue_args *args = data; - struct kfd_dev *dev; + struct kfd_node *dev; int err = 0; unsigned int queue_id; struct kfd_process_device *pdd; @@ -328,7 +328,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(dev->kfd, &pdd->doorbell_index) < 0) { err = -ENOMEM; goto err_alloc_doorbells; } @@ -336,7 +336,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) */ - if (dev->shared_resources.enable_mes && + if (dev->kfd->shared_resources.enable_mes && ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { struct amdgpu_bo_va_mapping *wptr_mapping; @@ -887,7 +887,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, { struct kfd_ioctl_set_scratch_backing_va_args *args = data; struct kfd_process_device *pdd; - struct kfd_dev *dev; + struct kfd_node *dev; long err; mutex_lock(&p->mutex); @@ -1006,18 +1006,18 @@ err_drm_file: return ret; } -bool kfd_dev_is_large_bar(struct kfd_dev *dev) +bool kfd_dev_is_large_bar(struct kfd_node *dev) { if (debug_largebar) { pr_debug("Simulate large-bar allocation on non large-bar machine\n"); return true; } - if (dev->use_iommu_v2) + if (dev->kfd->use_iommu_v2) return false; - if (dev->local_mem_info.local_mem_size_private == 0 && - dev->local_mem_info.local_mem_size_public > 0) + if (dev->kfd->local_mem_info.local_mem_size_private == 0 && + dev->kfd->local_mem_info.local_mem_size_public > 0) return true; return false; } @@ -1041,7 +1041,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; struct kfd_process_device *pdd; void *mem; - struct kfd_dev *dev; + struct kfd_node *dev; int idr_handle; long err; uint64_t offset = args->mmap_offset; @@ -1105,7 +1105,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { - if (args->size != kfd_doorbell_process_slice(dev)) { + if (args->size != kfd_doorbell_process_slice(dev->kfd)) { err = -EINVAL; goto err_unlock; } @@ -1231,7 +1231,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, struct kfd_ioctl_map_memory_to_gpu_args *args = data; struct kfd_process_device *pdd, *peer_pdd; void *mem; - struct kfd_dev *dev; + struct kfd_node *dev; long err = 0; int i; uint32_t *devices_arr = NULL; @@ -1405,7 +1405,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, args->n_success = i+1; } - flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev); + flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); if (flush_tlb) { err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, (struct kgd_mem *) mem, true); @@ -1445,7 +1445,7 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep, int retval; struct kfd_ioctl_alloc_queue_gws_args *args = data; struct queue *q; - struct kfd_dev *dev; + struct kfd_node *dev; mutex_lock(&p->mutex); q = pqm_get_user_queue(&p->pqm, args->queue_id); @@ -1482,7 +1482,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_get_dmabuf_info_args *args = data; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; @@ -1596,7 +1596,7 @@ static int kfd_ioctl_export_dmabuf(struct file *filep, struct kfd_ioctl_export_dmabuf_args *args = data; struct kfd_process_device *pdd; struct dma_buf *dmabuf; - struct kfd_dev *dev; + struct kfd_node *dev; void *mem; int ret = 0; @@ -2178,7 +2178,7 @@ static int criu_restore_devices(struct kfd_process *p, } for (i = 0; i < args->num_devices; i++) { - struct kfd_dev *dev; + struct kfd_node *dev; struct kfd_process_device *pdd; struct file *drm_file; @@ -2240,7 +2240,7 @@ static int criu_restore_devices(struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { ret = -ENOMEM; goto exit; } @@ -2268,7 +2268,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, u64 offset; if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { - if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev)) + if (bo_bucket->size != + kfd_doorbell_process_slice(pdd->dev->kfd)) return -EINVAL; offset = kfd_get_process_doorbells(pdd); @@ -2350,7 +2351,7 @@ static int criu_restore_bo(struct kfd_process *p, /* now map these BOs to GPU/s */ for (j = 0; j < p->n_pdds; j++) { - struct kfd_dev *peer; + struct kfd_node *peer; struct kfd_process_device *peer_pdd; if (!bo_priv->mapped_gpuids[j]) @@ -2947,7 +2948,7 @@ err_i1: return retcode; } -static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, +static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { phys_addr_t address; @@ -2981,7 +2982,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) { struct kfd_process *process; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; unsigned long mmap_offset; unsigned int gpu_id; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 475e47027354..f5aebba31e88 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1405,7 +1405,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, return i; } -int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info) +int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) { int num_of_cache_types = 0; @@ -1524,7 +1524,7 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): num_of_cache_types = - kfd_fill_gpu_cache_info_from_gfx_config(kdev, *pcache_info); + kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); break; default: *pcache_info = dummy_cache_info; @@ -1858,7 +1858,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) } static int kfd_fill_gpu_memory_affinity(int *avail_size, - struct kfd_dev *kdev, uint8_t type, uint64_t size, + struct kfd_node *kdev, uint8_t type, uint64_t size, struct crat_subtype_memory *sub_type_hdr, uint32_t proximity_domain, const struct kfd_local_mem_info *local_mem_info) @@ -1887,7 +1887,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size, } #ifdef CONFIG_ACPI_NUMA -static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) +static void kfd_find_numa_node_in_srat(struct kfd_node *kdev) { struct acpi_table_header *table_header = NULL; struct acpi_subtable_header *sub_header = NULL; @@ -1982,7 +1982,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) * Return 0 if successful else return -ve value */ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, - struct kfd_dev *kdev, + struct kfd_node *kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) { @@ -2044,8 +2044,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, } static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, - struct kfd_dev *kdev, - struct kfd_dev *peer_kdev, + struct kfd_node *kdev, + struct kfd_node *peer_kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain_from, uint32_t proximity_domain_to) @@ -2081,7 +2081,7 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, * [OUT] actual size of data filled in crat_image */ static int kfd_create_vcrat_image_gpu(void *pcrat_image, - size_t *size, struct kfd_dev *kdev, + size_t *size, struct kfd_node *kdev, uint32_t proximity_domain) { struct crat_header *crat_table = (struct crat_header *)pcrat_image; @@ -2153,7 +2153,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, /* Check if this node supports IOMMU. During parsing this flag will * translate to HSA_CAP_ATS_PRESENT */ - if (!kfd_iommu_check_device(kdev)) + if (!kfd_iommu_check_device(kdev->kfd)) cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; crat_table->length += sub_type_hdr->length; @@ -2164,7 +2164,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - local_mem_info = kdev->local_mem_info; + local_mem_info = kdev->kfd->local_mem_info; sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); @@ -2216,12 +2216,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * (from other GPU to this GPU) will be added * in kfd_parse_subtype_iolink. */ - if (kdev->hive_id) { + if (kdev->kfd->hive_id) { for (nid = 0; nid < proximity_domain; ++nid) { peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); if (!peer_dev->gpu) continue; - if (peer_dev->gpu->hive_id != kdev->hive_id) + if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; sub_type_hdr = (typeof(sub_type_hdr))( (char *)sub_type_hdr + @@ -2255,12 +2255,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU * -- this option is not currently implemented. * The assumption is that all AMD APUs will have CRAT - * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU + * @kdev: Valid kfd_node required if flags contain COMPUTE_UNIT_GPU * * Return 0 if successful else return -ve value */ int kfd_create_crat_image_virtual(void **crat_image, size_t *size, - int flags, struct kfd_dev *kdev, + int flags, struct kfd_node *kdev, uint32_t proximity_domain) { void *pcrat_image = NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 8d1e8ba58dee..3d0e533b93b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -293,7 +293,7 @@ struct crat_subtype_generic { #pragma pack() -struct kfd_dev; +struct kfd_node; /* Static table to describe GPU Cache information */ struct kfd_gpu_cache_info { @@ -305,14 +305,14 @@ struct kfd_gpu_cache_info { */ uint32_t num_cu_shared; }; -int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info); +int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info); int kfd_create_crat_image_acpi(void **crat_image, size_t *size); void kfd_destroy_crat_image(void *crat_image); int kfd_parse_crat_table(void *crat_image, struct list_head *device_list, uint32_t proximity_domain); int kfd_create_crat_image_virtual(void **crat_image, size_t *size, - int flags, struct kfd_dev *kdev, + int flags, struct kfd_node *kdev, uint32_t proximity_domain); #endif /* KFD_CRAT_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index ad5a40a685ac..4a5a0a4e00f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -43,7 +43,7 @@ static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data) static ssize_t kfd_debugfs_hang_hws_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { - struct kfd_dev *dev; + struct kfd_node *dev; char tmp[16]; uint32_t gpu_id; int ret = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1510041a6ee1..23d9a7f77055 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -61,7 +61,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume_iommu(struct kfd_dev *kfd); -static int kfd_resume(struct kfd_dev *kfd); +static int kfd_resume(struct kfd_node *kfd); static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) { @@ -441,8 +441,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) memset(&kfd->doorbell_available_index, 0, sizeof(kfd->doorbell_available_index)); - atomic_set(&kfd->sram_ecc_flag, 0); - ida_init(&kfd->doorbell_ida); return kfd; @@ -489,41 +487,106 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) } } -static int kfd_gws_init(struct kfd_dev *kfd) +static int kfd_gws_init(struct kfd_node *node) { int ret = 0; + struct kfd_dev *kfd = node->kfd; - if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) + if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; - if (hws_gws_support || (KFD_IS_SOC15(kfd) && - ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) + if (hws_gws_support || (KFD_IS_SOC15(node) && + ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1) && kfd->mec2_fw_version >= 0x81b3) || - (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) + (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0) && kfd->mec2_fw_version >= 0x1b3) || - (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1) && kfd->mec2_fw_version >= 0x30) || - (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) && kfd->mec2_fw_version >= 0x28) || - (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0) - && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0) + (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) + && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) && kfd->mec2_fw_version >= 0x6b)))) - ret = amdgpu_amdkfd_alloc_gws(kfd->adev, - kfd->adev->gds.gws_size, &kfd->gws); + ret = amdgpu_amdkfd_alloc_gws(node->adev, + node->adev->gds.gws_size, &node->gws); return ret; } -static void kfd_smi_init(struct kfd_dev *dev) +static void kfd_smi_init(struct kfd_node *dev) { INIT_LIST_HEAD(&dev->smi_clients); spin_lock_init(&dev->smi_lock); } +static int kfd_init_node(struct kfd_node *node) +{ + int err = -1; + + if (kfd_interrupt_init(node)) { + dev_err(kfd_device, "Error initializing interrupts\n"); + goto kfd_interrupt_error; + } + + node->dqm = device_queue_manager_init(node); + if (!node->dqm) { + dev_err(kfd_device, "Error initializing queue manager\n"); + goto device_queue_manager_error; + } + + if (kfd_gws_init(node)) { + dev_err(kfd_device, "Could not allocate %d gws\n", + node->adev->gds.gws_size); + goto gws_error; + } + + if (kfd_resume(node)) + goto kfd_resume_error; + + if (kfd_topology_add_device(node)) { + dev_err(kfd_device, "Error adding device to topology\n"); + goto kfd_topology_add_device_error; + } + + kfd_smi_init(node); + + return 0; + +kfd_topology_add_device_error: +kfd_resume_error: +gws_error: + device_queue_manager_uninit(node->dqm); +device_queue_manager_error: + kfd_interrupt_exit(node); +kfd_interrupt_error: + if (node->gws) + amdgpu_amdkfd_free_gws(node->adev, node->gws); + + /* Cleanup the node memory here */ + kfree(node); + return err; +} + +static void kfd_cleanup_node(struct kfd_dev *kfd) +{ + struct kfd_node *knode = kfd->node; + + device_queue_manager_uninit(knode->dqm); + kfd_interrupt_exit(knode); + kfd_topology_remove_device(knode); + if (knode->gws) + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); + kfree(knode); + kfd->node = NULL; +} + bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { unsigned int size, map_process_packet_size; + struct kfd_node *node; + uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; + unsigned int max_proc_per_quantum; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -533,10 +596,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; - kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; - kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd - - kfd->vm_info.first_vmid_kfd + 1; + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be @@ -557,9 +619,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* Verify module parameters regarding mapped process number*/ if (hws_max_conc_proc >= 0) - kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd); else - kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; + max_proc_per_quantum = vmid_num_kfd; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * @@ -609,26 +671,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->noretry = kfd->adev->gmc.noretry; - if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, "Error initializing interrupts\n"); - goto kfd_interrupt_error; - } - - kfd->dqm = device_queue_manager_init(kfd); - if (!kfd->dqm) { - dev_err(kfd_device, "Error initializing queue manager\n"); - goto device_queue_manager_error; - } - - /* If supported on this device, allocate global GWS that is shared - * by all KFD processes - */ - if (kfd_gws_init(kfd)) { - dev_err(kfd_device, "Could not allocate %d gws\n", - kfd->adev->gds.gws_size); - goto gws_error; - } - /* If CRAT is broken, won't set iommu enabled */ kfd_double_confirm_iommu_support(kfd); @@ -642,46 +684,54 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - if (kfd_resume_iommu(kfd)) - goto device_iommu_error; - - if (kfd_resume(kfd)) - goto kfd_resume_error; - - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); - - if (kfd_topology_add_device(kfd)) { - dev_err(kfd_device, "Error adding device to topology\n"); - goto kfd_topology_add_device_error; + /* Allocate the KFD node */ + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); + if (!node) { + dev_err(kfd_device, "Error allocating KFD node\n"); + goto node_alloc_error; } - kfd_smi_init(kfd); + node->adev = kfd->adev; + node->kfd = kfd; + node->kfd2kgd = kfd->kfd2kgd; + node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->vm_info.first_vmid_kfd = first_vmid_kfd; + node->vm_info.last_vmid_kfd = last_vmid_kfd; + node->max_proc_per_quantum = max_proc_per_quantum; + atomic_set(&node->sram_ecc_flag, 0); + + /* Initialize the KFD node */ + if (kfd_init_node(node)) { + dev_err(kfd_device, "Error initializing KFD node\n"); + goto node_init_error; + } + kfd->node = node; + + if (kfd_resume_iommu(kfd)) + goto kfd_resume_iommu_error; + + amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); pr_debug("Starting kfd with the following scheduling policy %d\n", - kfd->dqm->sched_policy); + node->dqm->sched_policy); goto out; -kfd_topology_add_device_error: -kfd_resume_error: +kfd_resume_iommu_error: + kfd_cleanup_node(kfd); +node_init_error: +node_alloc_error: device_iommu_error: -gws_error: - device_queue_manager_uninit(kfd->dqm); -device_queue_manager_error: - kfd_interrupt_exit(kfd); -kfd_interrupt_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); alloc_gtt_mem_failure: - if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); @@ -692,15 +742,11 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - device_queue_manager_uninit(kfd->dqm); - kfd_interrupt_exit(kfd); - kfd_topology_remove_device(kfd); + kfd_cleanup_node(kfd); kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); - if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); } kfree(kfd); @@ -708,16 +754,18 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) int kgd2kfd_pre_reset(struct kfd_dev *kfd) { + struct kfd_node *node = kfd->node; + if (!kfd->init_complete) return 0; - kfd_smi_event_update_gpu_reset(kfd, false); + kfd_smi_event_update_gpu_reset(node, false); - kfd->dqm->ops.pre_reset(kfd->dqm); + node->dqm->ops.pre_reset(node->dqm); kgd2kfd_suspend(kfd, false); - kfd_signal_reset_event(kfd); + kfd_signal_reset_event(node); return 0; } @@ -730,18 +778,19 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { int ret; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return 0; - ret = kfd_resume(kfd); + ret = kfd_resume(node); if (ret) return ret; atomic_dec(&kfd_locked); - atomic_set(&kfd->sram_ecc_flag, 0); + atomic_set(&node->sram_ecc_flag, 0); - kfd_smi_event_update_gpu_reset(kfd, true); + kfd_smi_event_update_gpu_reset(node, true); return 0; } @@ -753,6 +802,8 @@ bool kfd_is_locked(void) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { + struct kfd_node *node = kfd->node; + if (!kfd->init_complete) return; @@ -763,18 +814,19 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kfd_suspend_all_processes(); } - kfd->dqm->ops.stop(kfd->dqm); + node->dqm->ops.stop(node->dqm); kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { int ret, count; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return 0; - ret = kfd_resume(kfd); + ret = kfd_resume(node); if (ret) return ret; @@ -809,15 +861,15 @@ static int kfd_resume_iommu(struct kfd_dev *kfd) return err; } -static int kfd_resume(struct kfd_dev *kfd) +static int kfd_resume(struct kfd_node *node) { int err = 0; - err = kfd->dqm->ops.start(kfd->dqm); + err = node->dqm->ops.start(node->dqm); if (err) dev_err(kfd_device, "Error starting queue manager for device %x:%x\n", - kfd->adev->pdev->vendor, kfd->adev->pdev->device); + node->adev->pdev->vendor, node->adev->pdev->device); return err; } @@ -843,6 +895,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; bool is_patched = false; unsigned long flags; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return; @@ -852,16 +905,16 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock_irqsave(&kfd->interrupt_lock, flags); + spin_lock_irqsave(&node->interrupt_lock, flags); - if (kfd->interrupts_active - && interrupt_is_wanted(kfd, ih_ring_entry, + if (node->interrupts_active + && interrupt_is_wanted(node, ih_ring_entry, patched_ihre, &is_patched) - && enqueue_ih_ring_entry(kfd, + && enqueue_ih_ring_entry(node, is_patched ? patched_ihre : ih_ring_entry)) - kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); + kfd_queue_work(node->ih_wq, &node->interrupt_work); - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + spin_unlock_irqrestore(&node->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) @@ -999,10 +1052,11 @@ static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); } -int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, +int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, struct kfd_mem_obj **mem_obj) { unsigned int found, start_search, cur_size; + struct kfd_dev *kfd = node->kfd; if (size == 0) return -EINVAL; @@ -1102,8 +1156,10 @@ kfd_gtt_no_free_chunk: return -ENOMEM; } -int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) +int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) { + struct kfd_dev *kfd = node->kfd; + /* Act like kfree when trying to free a NULL object */ if (!mem_obj) return 0; @@ -1126,28 +1182,28 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { if (kfd) - atomic_inc(&kfd->sram_ecc_flag); + atomic_inc(&kfd->node->sram_ecc_flag); } -void kfd_inc_compute_active(struct kfd_dev *kfd) +void kfd_inc_compute_active(struct kfd_node *node) { - if (atomic_inc_return(&kfd->compute_profile) == 1) - amdgpu_amdkfd_set_compute_idle(kfd->adev, false); + if (atomic_inc_return(&node->kfd->compute_profile) == 1) + amdgpu_amdkfd_set_compute_idle(node->adev, false); } -void kfd_dec_compute_active(struct kfd_dev *kfd) +void kfd_dec_compute_active(struct kfd_node *node) { - int count = atomic_dec_return(&kfd->compute_profile); + int count = atomic_dec_return(&node->kfd->compute_profile); if (count == 0) - amdgpu_amdkfd_set_compute_idle(kfd->adev, true); + amdgpu_amdkfd_set_compute_idle(node->adev, true); WARN_ONCE(count < 0, "Compute profile ref. count error"); } void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { if (kfd && kfd->init_complete) - kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); + kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); } /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and @@ -1155,19 +1211,19 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) * When the device has more than two engines, we reserve two for PCIe to enable * full-duplex and the rest are used as XGMI. */ -unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) +unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ - if (!kdev->adev->gmc.xgmi.supported) - return kdev->adev->sdma.num_instances; + if (!node->adev->gmc.xgmi.supported) + return node->adev->sdma.num_instances; - return min(kdev->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances, 2); } -unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); + return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) @@ -1175,7 +1231,7 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) /* This function will send a package to HIQ to hang the HWS * which will trigger a GPU reset and bring the HWS back to normal state */ -int kfd_debugfs_hang_hws(struct kfd_dev *dev) +int kfd_debugfs_hang_hws(struct kfd_node *dev) { if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { pr_err("HWS is not enabled"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 7a95698d83f7..34977d89f01c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -74,31 +74,31 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) { int i; - int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec - + pipe) * dqm->dev->shared_resources.num_queue_per_pipe; + int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec + + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; /* queue is available for KFD usage if bit is 1 */ - for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) + for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) if (test_bit(pipe_offset + i, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) return true; return false; } unsigned int get_cp_queues_num(struct device_queue_manager *dqm) { - return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap, + return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, KGD_MAX_QUEUES); } unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_queue_per_pipe; + return dqm->dev->kfd->shared_resources.num_queue_per_pipe; } unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_pipe_per_mec; + return dqm->dev->kfd->shared_resources.num_pipe_per_mec; } static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) @@ -110,18 +110,18 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_xgmi_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) { - return dqm->dev->device_info.reserved_sdma_queues_bitmap; + return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } void program_sh_mem_settings(struct device_queue_manager *dqm, @@ -330,7 +330,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q, uint32_t const *restore_id) { - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to @@ -349,7 +349,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, * for a SDMA engine is 512. */ - uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx; + uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; uint32_t valid_id = idx_offset[q->properties.sdma_engine_id] + (q->properties.sdma_queue_id & 1) * KFD_QUEUE_DOORBELL_MIRROR_OFFSET @@ -382,7 +382,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, } q->properties.doorbell_off = - kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd), + kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd), q->doorbell_id); return 0; } @@ -391,7 +391,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { unsigned int old; - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || @@ -441,7 +441,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -460,7 +460,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, return 0; } -static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, +static int flush_texture_cache_nocpsch(struct kfd_node *kdev, struct qcm_process_device *qpd) { const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; @@ -661,7 +661,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm, #define SQ_IND_CMD_CMD_KILL 0x00000003 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 -static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) +static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; @@ -837,7 +837,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, /* Make sure the queue is unmapped before updating the MQD */ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); else if (prev_active) @@ -858,7 +858,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -895,7 +895,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = map_queues_cpsch(dqm); else if (q->properties.is_active) retval = add_queue_mes(dqm, q, &pdd->qpd); @@ -951,7 +951,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, continue; retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -993,7 +993,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = false; decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to evict queue %d\n", @@ -1003,7 +1003,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, } } pdd->last_evict_timestamp = get_jiffies_64(); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : @@ -1132,7 +1132,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = true; increment_queue_count(dqm, &pdd->qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = add_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to restore queue %d\n", @@ -1141,7 +1141,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); qpd->evicted = 0; @@ -1282,7 +1282,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) if (test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) dqm->allocated_queues[pipe] |= 1 << queue; } @@ -1426,14 +1426,14 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { - mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) - / dqm->dev->shared_resources.num_pipe_per_mec; + mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) + / dqm->dev->kfd->shared_resources.num_pipe_per_mec; - if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap)) + if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; /* only acquire queues from the first MEC */ @@ -1489,7 +1489,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { retval = pm_init(&dqm->packet_mgr, dqm); if (retval) goto fail_packet_manager_init; @@ -1516,14 +1516,14 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); return 0; fail_allocate_vidmem: fail_set_sched_resources: - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, false); fail_packet_manager_init: dqm_unlock(dqm); @@ -1541,7 +1541,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->is_hws_hang) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); else remove_all_queues_mes(dqm); @@ -1550,11 +1550,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) hanging = dqm->is_hws_hang || dqm->is_resetting; dqm->sched_running = false; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_release_ib(&dqm->packet_mgr); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, hanging); dqm_unlock(dqm); @@ -1673,7 +1673,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.is_active) { increment_queue_count(dqm, qpd, q); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); else @@ -1893,7 +1893,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -2056,7 +2056,7 @@ static int get_wave_state(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || - q->properties.is_active || !q->device->cwsr_enabled || + q->properties.is_active || !q->device->kfd->cwsr_enabled || !mqd_mgr->get_wave_state) { dqm_unlock(dqm); return -EINVAL; @@ -2105,7 +2105,7 @@ static int checkpoint_mqd(struct device_queue_manager *dqm, dqm_lock(dqm); - if (q->properties.is_active || !q->device->cwsr_enabled) { + if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { r = -EINVAL; goto dqm_unlock; } @@ -2158,7 +2158,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, if (q->properties.is_active) { decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) pr_err("Failed to remove queue %d\n", @@ -2180,7 +2180,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, filter, 0); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { @@ -2242,11 +2242,11 @@ out_free: static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) { int retval; - struct kfd_dev *dev = dqm->dev; + struct kfd_node *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, @@ -2256,7 +2256,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) return retval; } -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) { struct device_queue_manager *dqm; @@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (init_mqd_managers(dqm)) goto out_free; - if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { + if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { pr_err("Failed to allocate hiq sdma mqd trunk buffer\n"); goto out_free; } @@ -2386,7 +2386,7 @@ out_free: return NULL; } -static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, +static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd) { WARN(!mqd, "No hiq sdma mqd trunk to free"); @@ -2397,7 +2397,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, void device_queue_manager_uninit(struct device_queue_manager *dqm) { dqm->ops.uninitialize(dqm); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); kfree(dqm); } @@ -2479,7 +2479,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { if (!test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; r = dqm->dev->kfd2kgd->hqd_dump( @@ -2497,7 +2497,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; - queue < dqm->dev->device_info.num_sdma_queues_per_engine; + queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( dqm->dev->adev, pipe, queue, &dump, &n_regs); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a537b9ef3e16..e554a48f3054 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -207,7 +207,7 @@ struct device_queue_manager_asic_ops { struct queue *q, struct qcm_process_device *qpd); struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); }; /** @@ -228,7 +228,7 @@ struct device_queue_manager { struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX]; struct packet_manager packet_mgr; - struct kfd_dev *dev; + struct kfd_node *dev; struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */ struct list_head queues; unsigned int saved_flags; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 914d94679d73..8af643388768 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -60,10 +60,10 @@ static int update_qpd_v9(struct device_queue_manager *dqm, qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) + if (dqm->dev->kfd->noretry && !dqm->dev->kfd->use_iommu_v2) qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 38c9e1ca6691..6421b620388d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -138,7 +138,7 @@ void kfd_doorbell_fini(struct kfd_dev *kfd) iounmap(kfd->doorbell_kernel_ptr); } -int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { phys_addr_t address; @@ -148,7 +148,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, * For simplicitly we only allow mapping of the entire doorbell * allocation of a single device & process. */ - if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev)) + if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd)) return -EINVAL; pdd = kfd_get_process_device_data(dev, process); @@ -170,13 +170,13 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, " vm_flags == 0x%04lX\n" " size == 0x%04lX\n", (unsigned long long) vma->vm_start, address, vma->vm_flags, - kfd_doorbell_process_slice(dev)); + kfd_doorbell_process_slice(dev->kfd)); return io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - kfd_doorbell_process_slice(dev), + kfd_doorbell_process_slice(dev->kfd), vma->vm_page_prot); } @@ -278,14 +278,14 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd) phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) { if (!pdd->doorbell_index) { - int r = kfd_alloc_process_doorbells(pdd->dev, + int r = kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index); if (r < 0) return 0; } - return pdd->dev->doorbell_base + - pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev); + return pdd->dev->kfd->doorbell_base + + pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev->kfd); } int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index c894cf8f7c50..9926186f88a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -348,7 +348,7 @@ static int kfd_event_page_set(struct kfd_process *p, void *kernel_address, int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset) { - struct kfd_dev *kfd; + struct kfd_node *kfd; struct kfd_process_device *pdd; void *mem, *kern_addr; uint64_t size; @@ -1125,7 +1125,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, } #ifdef KFD_SUPPORT_IOMMU_V2 -void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, +void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested) { @@ -1221,8 +1221,8 @@ void kfd_signal_hw_exception_event(u32 pasid) kfd_unref_process(p); } -void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, - struct kfd_vm_fault_info *info) +void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, + struct kfd_vm_fault_info *info) { struct kfd_event *ev; uint32_t id; @@ -1269,7 +1269,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, kfd_unref_process(p); } -void kfd_signal_reset_event(struct kfd_dev *dev) +void kfd_signal_reset_event(struct kfd_node *dev) { struct kfd_hsa_hw_exception_data hw_exception_data; struct kfd_hsa_memory_exception_data memory_exception_data; @@ -1325,7 +1325,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev) srcu_read_unlock(&kfd_processes_srcu, idx); } -void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid) +void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) { struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); struct kfd_hsa_memory_exception_data memory_exception_data; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 8aebe408c544..da2ca00d79e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -322,21 +322,21 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - if (!pdd->dev->use_iommu_v2) { + if (!pdd->dev->kfd->use_iommu_v2) { /* dGPUs: SVM aperture starting at 0 * with small reserved space for kernel. * Set them to CANONICAL addresses. */ pdd->gpuvm_base = SVM_USER_BASE; pdd->gpuvm_limit = - pdd->dev->shared_resources.gpuvm_size - 1; + pdd->dev->kfd->shared_resources.gpuvm_size - 1; } else { /* set them to non CANONICAL addresses, and no SVM is * allocated. */ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1); pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base, - pdd->dev->shared_resources.gpuvm_size); + pdd->dev->kfd->shared_resources.gpuvm_size); } pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); @@ -356,7 +356,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) */ pdd->gpuvm_base = SVM_USER_BASE; pdd->gpuvm_limit = - pdd->dev->shared_resources.gpuvm_size - 1; + pdd->dev->kfd->shared_resources.gpuvm_size - 1; pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); @@ -365,7 +365,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) int kfd_init_apertures(struct kfd_process *process) { uint8_t id = 0; - struct kfd_dev *dev; + struct kfd_node *dev; struct kfd_process_device *pdd; /*Iterating over all devices*/ @@ -417,7 +417,7 @@ int kfd_init_apertures(struct kfd_process *process) } } - if (!dev->use_iommu_v2) { + if (!dev->kfd->use_iommu_v2) { /* dGPUs: the reserved space for kernel * before SVM */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 0d53f6067422..0f0fdea4cd8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -187,7 +187,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1) REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID)); } -static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev, +static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, uint16_t pasid, uint16_t source_id) { int ret = -EINVAL; @@ -225,7 +225,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev, amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); } -static bool event_interrupt_isr_v11(struct kfd_dev *dev, +static bool event_interrupt_isr_v11(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -274,7 +274,7 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void event_interrupt_wq_v11(struct kfd_dev *dev, +static void event_interrupt_wq_v11(struct kfd_node *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, ring_id, pasid, vmid; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 0b75a37b689b..861bccb1e9dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -90,7 +90,7 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 -static void event_interrupt_poison_consumption_v9(struct kfd_dev *dev, +static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { int old_poison, ret = -EINVAL; @@ -160,7 +160,7 @@ static bool context_id_expected(struct kfd_dev *dev) } } -static bool event_interrupt_isr_v9(struct kfd_dev *dev, +static bool event_interrupt_isr_v9(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -206,7 +206,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, *patched_flag = true; memcpy(patched_ihre, ih_ring_entry, - dev->device_info.ih_ring_entry_size); + dev->kfd->device_info.ih_ring_entry_size); pasid = dev->dqm->vmid_pasid[vmid]; @@ -235,7 +235,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, uint32_t context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); - if (context_id == 0 && context_id_expected(dev)) + if (context_id == 0 && context_id_expected(dev->kfd)) return false; } @@ -253,7 +253,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void event_interrupt_wq_v9(struct kfd_dev *dev, +static void event_interrupt_wq_v9(struct kfd_node *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, pasid, vmid; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index 34772fe74296..dd3c43c1ad70 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -50,29 +50,29 @@ static void interrupt_wq(struct work_struct *); -int kfd_interrupt_init(struct kfd_dev *kfd) +int kfd_interrupt_init(struct kfd_node *node) { int r; - r = kfifo_alloc(&kfd->ih_fifo, - KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size, + r = kfifo_alloc(&node->ih_fifo, + KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, GFP_KERNEL); if (r) { - dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n"); + dev_err(node->adev->dev, "Failed to allocate IH fifo\n"); return r; } - kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); - if (unlikely(!kfd->ih_wq)) { - kfifo_free(&kfd->ih_fifo); - dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n"); + node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!node->ih_wq)) { + kfifo_free(&node->ih_fifo); + dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n"); return -ENOMEM; } - spin_lock_init(&kfd->interrupt_lock); + spin_lock_init(&node->interrupt_lock); - INIT_WORK(&kfd->interrupt_work, interrupt_wq); + INIT_WORK(&node->interrupt_work, interrupt_wq); - kfd->interrupts_active = true; + node->interrupts_active = true; /* * After this function returns, the interrupt will be enabled. This @@ -84,7 +84,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd) return 0; } -void kfd_interrupt_exit(struct kfd_dev *kfd) +void kfd_interrupt_exit(struct kfd_node *node) { /* * Stop the interrupt handler from writing to the ring and scheduling @@ -93,31 +93,31 @@ void kfd_interrupt_exit(struct kfd_dev *kfd) */ unsigned long flags; - spin_lock_irqsave(&kfd->interrupt_lock, flags); - kfd->interrupts_active = false; - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + spin_lock_irqsave(&node->interrupt_lock, flags); + node->interrupts_active = false; + spin_unlock_irqrestore(&node->interrupt_lock, flags); /* * flush_work ensures that there are no outstanding * work-queue items that will access interrupt_ring. New work items * can't be created because we stopped interrupt handling above. */ - flush_workqueue(kfd->ih_wq); + flush_workqueue(node->ih_wq); - kfifo_free(&kfd->ih_fifo); + kfifo_free(&node->ih_fifo); } /* * Assumption: single reader/writer. This function is not re-entrant */ -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) +bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry) { int count; - count = kfifo_in(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info.ih_ring_entry_size); - if (count != kfd->device_info.ih_ring_entry_size) { - dev_dbg_ratelimited(kfd->adev->dev, + count = kfifo_in(&node->ih_fifo, ih_ring_entry, + node->kfd->device_info.ih_ring_entry_size); + if (count != node->kfd->device_info.ih_ring_entry_size) { + dev_dbg_ratelimited(node->adev->dev, "Interrupt ring overflow, dropping interrupt %d\n", count); return false; @@ -129,32 +129,32 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) /* * Assumption: single reader/writer. This function is not re-entrant */ -static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) +static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry) { int count; - count = kfifo_out(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info.ih_ring_entry_size); + count = kfifo_out(&node->ih_fifo, ih_ring_entry, + node->kfd->device_info.ih_ring_entry_size); - WARN_ON(count && count != kfd->device_info.ih_ring_entry_size); + WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size); - return count == kfd->device_info.ih_ring_entry_size; + return count == node->kfd->device_info.ih_ring_entry_size; } static void interrupt_wq(struct work_struct *work) { - struct kfd_dev *dev = container_of(work, struct kfd_dev, + struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work); uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; unsigned long start_jiffies = jiffies; - if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { + if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { dev_err_once(dev->adev->dev, "Ring entry too small\n"); return; } while (dequeue_ih_ring_entry(dev, ih_ring_entry)) { - dev->device_info.event_interrupt_class->interrupt_wq(dev, + dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev, ih_ring_entry); if (time_is_before_jiffies(start_jiffies + HZ)) { /* If we spent more than a second processing signals, @@ -166,14 +166,14 @@ static void interrupt_wq(struct work_struct *work) } } -bool interrupt_is_wanted(struct kfd_dev *dev, +bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag) { /* integer and bitwise OR so there is no boolean short-circuiting */ unsigned int wanted = 0; - wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev, + wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev, ih_ring_entry, patched_ihre, flag); return wanted != 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index ec1bf611624e..6eee9a0944f3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -109,11 +109,11 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) */ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct kfd_process *p = pdd->process; int err; - if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND) + if (!dev->kfd->use_iommu_v2 || pdd->bound == PDD_BOUND) return 0; if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { @@ -146,7 +146,7 @@ void kfd_iommu_unbind_process(struct kfd_process *p) /* Callback for process shutdown invoked by the IOMMU driver */ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) { - struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); + struct kfd_node *dev = kfd_device_by_pci_dev(pdev); struct kfd_process *p; struct kfd_process_device *pdd; @@ -182,7 +182,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, unsigned long address, u16 flags) { - struct kfd_dev *dev; + struct kfd_node *dev; dev_warn_ratelimited(kfd_device, "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", @@ -205,7 +205,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, * Bind processes do the device that have been temporarily unbound * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device. */ -static int kfd_bind_processes_to_device(struct kfd_dev *kfd) +static int kfd_bind_processes_to_device(struct kfd_node *knode) { struct kfd_process_device *pdd; struct kfd_process *p; @@ -216,14 +216,14 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(kfd, p); + pdd = kfd_get_process_device_data(knode, p); if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) { mutex_unlock(&p->mutex); continue; } - err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid, + err = amd_iommu_bind_pasid(knode->adev->pdev, p->pasid, p->lead_thread); if (err < 0) { pr_err("Unexpected pasid 0x%x binding failure\n", @@ -246,7 +246,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) * processes will be restored to PDD_BOUND state in * kfd_bind_processes_to_device. */ -static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) +static void kfd_unbind_processes_from_device(struct kfd_node *knode) { struct kfd_process_device *pdd; struct kfd_process *p; @@ -256,7 +256,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(kfd, p); + pdd = kfd_get_process_device_data(knode, p); if (WARN_ON(!pdd)) { mutex_unlock(&p->mutex); @@ -281,7 +281,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) if (!kfd->use_iommu_v2) return; - kfd_unbind_processes_from_device(kfd); + kfd_unbind_processes_from_device(kfd->node); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); @@ -312,7 +312,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd); + err = kfd_bind_processes_to_device(kfd->node); if (err) { amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index bcf7bc3302c9..1bea629c49ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -38,7 +38,7 @@ /* Initialize a kernel queue, including allocations of GART memory * needed for the queue. */ -static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, +static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, enum kfd_queue_type type, unsigned int queue_size) { struct queue_properties prop; @@ -75,7 +75,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, if (!kq->mqd_mgr) return false; - prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); + prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); if (!prop.doorbell_ptr) { pr_err("Failed to initialize doorbell"); @@ -112,7 +112,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; - retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size, + retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size, &kq->wptr_mem); if (retval != 0) @@ -189,7 +189,7 @@ err_rptr_allocate_vidmem: err_eop_allocate_vidmem: kfd_gtt_sa_free(dev, kq->pq); err_pq_allocate_vidmem: - kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); + kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr); err_get_kernel_doorbell: return false; @@ -220,7 +220,7 @@ static void kq_uninitialize(struct kernel_queue *kq, bool hanging) kfd_gtt_sa_free(kq->dev, kq->eop_mem); kfd_gtt_sa_free(kq->dev, kq->pq); - kfd_release_kernel_doorbell(kq->dev, + kfd_release_kernel_doorbell(kq->dev->kfd, kq->queue->properties.doorbell_ptr); uninit_queue(kq->queue); } @@ -298,7 +298,7 @@ void kq_submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - if (kq->dev->device_info.doorbell_size == 8) { + if (kq->dev->kfd->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, kq->pending_wptr64); @@ -311,7 +311,7 @@ void kq_submit_packet(struct kernel_queue *kq) void kq_rollback_packet(struct kernel_queue *kq) { - if (kq->dev->device_info.doorbell_size == 8) { + if (kq->dev->kfd->device_info.doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; kq->pending_wptr = *kq->wptr_kernel % (kq->queue->properties.queue_size / 4); @@ -320,7 +320,7 @@ void kq_rollback_packet(struct kernel_queue *kq) } } -struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, +struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type) { struct kernel_queue *kq; @@ -345,7 +345,7 @@ void kernel_queue_uninit(struct kernel_queue *kq, bool hanging) } /* FIXME: Can this test be removed? */ -static __attribute__((unused)) void test_kq(struct kfd_dev *dev) +static __attribute__((unused)) void test_kq(struct kfd_node *dev) { struct kernel_queue *kq; uint32_t *buffer, i; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 383202fd1ea2..9a6244430845 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -53,7 +53,7 @@ void kq_rollback_packet(struct kernel_queue *kq); struct kernel_queue { /* data */ - struct kfd_dev *dev; + struct kfd_node *dev; struct mqd_manager *mqd_mgr; struct queue *queue; uint64_t pending_wptr64; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 54933903bcb8..1e187677c90a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,7 +423,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->id, prange->prefetch_loc, + 0, adev->kfd.dev->node->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,7 +456,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->id, trigger); + 0, adev->kfd.dev->node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -701,7 +701,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->id, 0, prange->prefetch_loc, + adev->kfd.dev->node->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -737,7 +737,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->id, 0, trigger); + adev->kfd.dev->node->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 623ccd227b7d..61f6dd68c84b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -46,7 +46,7 @@ int pipe_priority_map[] = { KFD_PIPE_PRIORITY_CS_HIGH }; -struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q) +struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj = NULL; @@ -61,7 +61,7 @@ struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_propertie return mqd_mem_obj; } -struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj = NULL; @@ -72,7 +72,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, return NULL; offset = (q->sdma_engine_id * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 57f900ccaa10..46fc3f273d0d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -68,7 +68,7 @@ */ extern int pipe_priority_map[]; struct mqd_manager { - struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd, + struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd, struct queue_properties *q); void (*init_mqd)(struct mqd_manager *mm, void **mqd, @@ -121,14 +121,14 @@ struct mqd_manager { uint32_t (*read_doorbell_id)(void *mqd); struct mutex mqd_mutex; - struct kfd_dev *dev; + struct kfd_node *dev; uint32_t mqd_size; }; -struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q); -struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q); void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 4889865c725c..03e04d5e5a11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -74,7 +74,7 @@ static void set_priority(struct cik_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -390,7 +390,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -470,7 +470,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, } struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index d3e2b6a599a4..7a93be0ebb19 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -74,7 +74,7 @@ static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -122,7 +122,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; } - if (mm->dev->cwsr_enabled) { + if (mm->dev->kfd->cwsr_enabled) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -210,7 +210,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled) + if (mm->dev->kfd->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -405,7 +405,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 5aa75f72caa1..dff171b54b5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -81,7 +81,7 @@ static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -91,12 +91,12 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * MES write to areas beyond MQD size. So allocate * 1 PAGE_SIZE memory for MQD is MES is enabled. */ - if (kfd->shared_resources.enable_mes) + if (node->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_compute_mqd); - if (kfd_gtt_sa_allocate(kfd, size, &mqd_mem_obj)) + if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj)) return NULL; return mqd_mem_obj; @@ -113,7 +113,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; - if (mm->dev->shared_resources.enable_mes) + if (mm->dev->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_compute_mqd); @@ -155,7 +155,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; } - if (mm->dev->cwsr_enabled) { + if (mm->dev->kfd->cwsr_enabled) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -243,7 +243,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled) + if (mm->dev->kfd->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -319,7 +319,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr; - if (mm->dev->shared_resources.enable_mes) + if (mm->dev->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_sdma_mqd); @@ -387,7 +387,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -463,7 +463,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, * To allocate SDMA MQDs by generic functions * when MES is enabled. */ - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { mqd->allocate_mqd = allocate_mqd; mqd->free_mqd = kfd_free_mqd_cp; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 51b53110341b..943a738e73f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -83,7 +83,7 @@ static void set_priority(struct v9_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, struct queue_properties *q) { int retval; @@ -105,11 +105,11 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct * amdgpu memory functions to do so. */ - if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { + if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev, + retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &(mqd_mem_obj->gtt_mem), @@ -121,7 +121,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, return NULL; } } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), + retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd), &mqd_mem_obj); if (retval) return NULL; @@ -136,7 +136,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, { uint64_t addr; struct v9_mqd *m; - struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev; m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; @@ -169,7 +168,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { /* On GC 9.4.3, DW 41 is re-purposed as * compute_tg_chunk_size. * TODO: review this setting when active CUs in the @@ -179,7 +178,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, } } else { /* PM4 queue */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { m->compute_static_thread_mgmt_se6 = 0; /* TODO: program pm4_target_xcc */ } @@ -190,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -225,7 +224,6 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q, struct mqd_update_info *minfo) { - struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev; struct v9_mqd *m; m = get_mqd(mqd); @@ -275,13 +273,13 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) m->cp_hqd_pq_control |= - CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -487,7 +485,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 530ba6f5b57e..f6b4a5686dcb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -77,7 +77,7 @@ static void set_priority(struct vi_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -136,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -227,7 +227,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT; } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT | mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT; @@ -446,7 +446,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -528,7 +528,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, } struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index f612325241aa..2f54172e9175 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -45,7 +45,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; unsigned int map_queue_size; unsigned int max_proc_per_quantum = 1; - struct kfd_dev *dev = pm->dqm->dev; + struct kfd_node *dev = pm->dqm->dev; process_count = pm->dqm->processes_count; queue_count = pm->dqm->active_queue_count; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 18250845a989..54d7d4665ad2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -119,7 +119,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, struct pm4_mes_runlist *packet; int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; + struct kfd_node *kfd = pm->dqm->dev; /* Determine the number of processes to map together to HW: * it can not exceed the number of VMIDs available to the @@ -220,7 +220,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: use_static = false; /* no static queues under SDMA */ - if (q->properties.sdma_engine_id < 2 && !pm_use_ext_eng(q->device)) + if (q->properties.sdma_engine_id < 2 && + !pm_use_ext_eng(q->device->kfd)) packet->bitfields2.engine_sel = q->properties.sdma_engine_id + engine_sel__mes_map_queues__sdma0_vi; else { @@ -263,7 +264,8 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, sizeof(struct pm4_mes_unmap_queues)); - packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ? + packet->bitfields2.extended_engine_sel = + pm_use_ext_eng(pm->dqm->dev->kfd) ? extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel : extended_engine_sel__mes_unmap_queues__legacy_engine_sel; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 4f951eaa6ee8..faf4772ed317 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -77,7 +77,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, { struct pm4_mes_runlist *packet; int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; + struct kfd_node *kfd = pm->dqm->dev; if (WARN_ON(!ib)) return -EFAULT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 94a438956868..fdb97e5d0c01 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -210,11 +210,13 @@ enum cache_policy { ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) +struct kfd_node; + struct kfd_event_interrupt_class { - bool (*interrupt_isr)(struct kfd_dev *dev, + bool (*interrupt_isr)(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag); - void (*interrupt_wq)(struct kfd_dev *dev, + void (*interrupt_wq)(struct kfd_node *dev, const uint32_t *ih_ring_entry); }; @@ -236,8 +238,8 @@ struct kfd_device_info { uint64_t reserved_sdma_queues_bitmap; }; -unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); -unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); +unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); struct kfd_mem_obj { uint32_t range_start; @@ -253,13 +255,59 @@ struct kfd_vmid_info { uint32_t vmid_num_kfd; }; +struct kfd_dev; + +struct kfd_node { + struct amdgpu_device *adev; /* Duplicated here along with keeping + * a copy in kfd_dev to save a hop + */ + const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with + * keeping a copy in kfd_dev to + * save a hop + */ + struct kfd_vmid_info vm_info; + unsigned int id; /* topology stub index */ + /* Interrupts */ + struct kfifo ih_fifo; + struct workqueue_struct *ih_wq; + struct work_struct interrupt_work; + spinlock_t interrupt_lock; + + /* + * Interrupts of interest to KFD are copied + * from the HW ring into a SW ring. + */ + bool interrupts_active; + + /* QCM Device instance */ + struct device_queue_manager *dqm; + + /* Global GWS resource shared between processes */ + void *gws; + bool gws_debug_workaround; + + /* Clients watching SMI events */ + struct list_head smi_clients; + spinlock_t smi_lock; + uint32_t reset_seq_num; + + /* SRAM ECC flag */ + atomic_t sram_ecc_flag; + + /*spm process id */ + unsigned int spm_pasid; + + /* Maximum process number mapped to HW scheduler */ + unsigned int max_proc_per_quantum; + + struct kfd_dev *kfd; +}; + struct kfd_dev { struct amdgpu_device *adev; struct kfd_device_info device_info; - unsigned int id; /* topology stub index */ - phys_addr_t doorbell_base; /* Start of actual doorbells used by * KFD. It is aligned for mapping * into user mode @@ -274,7 +322,6 @@ struct kfd_dev { */ struct kgd2kfd_shared_resources shared_resources; - struct kfd_vmid_info vm_info; struct kfd_local_mem_info local_mem_info; const struct kfd2kgd_calls *kfd2kgd; @@ -290,30 +337,13 @@ struct kfd_dev { unsigned int gtt_sa_chunk_size; unsigned int gtt_sa_num_of_chunks; - /* Interrupts */ - struct kfifo ih_fifo; - struct workqueue_struct *ih_wq; - struct work_struct interrupt_work; - spinlock_t interrupt_lock; - - /* QCM Device instance */ - struct device_queue_manager *dqm; - bool init_complete; - /* - * Interrupts of interest to KFD are copied - * from the HW ring into a SW ring. - */ - bool interrupts_active; /* Firmware versions */ uint16_t mec_fw_version; uint16_t mec2_fw_version; uint16_t sdma_fw_version; - /* Maximum process number mapped to HW scheduler */ - unsigned int max_proc_per_quantum; - /* CWSR */ bool cwsr_enabled; const void *cwsr_isa; @@ -327,21 +357,9 @@ struct kfd_dev { /* Use IOMMU v2 flag */ bool use_iommu_v2; - /* SRAM ECC flag */ - atomic_t sram_ecc_flag; - /* Compute Profile ref. count */ atomic_t compute_profile; - /* Global GWS resource shared between processes */ - void *gws; - - /* Clients watching SMI events */ - struct list_head smi_clients; - spinlock_t smi_lock; - - uint32_t reset_seq_num; - struct ida doorbell_ida; unsigned int max_doorbell_slices; @@ -349,6 +367,8 @@ struct kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; + + struct kfd_node *node; }; enum kfd_mempool { @@ -563,7 +583,7 @@ struct queue { unsigned int doorbell_id; struct kfd_process *process; - struct kfd_dev *device; + struct kfd_node *device; void *gws; /* procfs */ @@ -697,7 +717,7 @@ enum kfd_pdd_bound { /* Data that is per-process-per device. */ struct kfd_process_device { /* The device that owns this data. */ - struct kfd_dev *dev; + struct kfd_node *dev; /* The process that owns this kfd_process_device. */ struct kfd_process *process; @@ -925,7 +945,7 @@ struct amdkfd_ioctl_desc { unsigned int cmd_drv; const char *name; }; -bool kfd_dev_is_large_bar(struct kfd_dev *dev); +bool kfd_dev_is_large_bar(struct kfd_node *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); @@ -961,16 +981,16 @@ int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct file *drm_file); -struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, struct kfd_process *p); -struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p); -struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p); bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); -int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma); /* KFD process API for creating and translating handles */ @@ -994,7 +1014,7 @@ void kfd_pasid_free(u32 pasid); size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); int kfd_doorbell_init(struct kfd_dev *kfd); void kfd_doorbell_fini(struct kfd_dev *kfd); -int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma); void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off); @@ -1012,10 +1032,10 @@ void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index); /* GTT Sub-Allocator */ -int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, +int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, struct kfd_mem_obj **mem_obj); -int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); +int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); extern struct device *kfd_device; @@ -1028,25 +1048,25 @@ void kfd_procfs_del_queue(struct queue *q); /* Topology */ int kfd_topology_init(void); void kfd_topology_shutdown(void); -int kfd_topology_add_device(struct kfd_dev *gpu); -int kfd_topology_remove_device(struct kfd_dev *gpu); +int kfd_topology_add_device(struct kfd_node *gpu); +int kfd_topology_remove_device(struct kfd_node *gpu); struct kfd_topology_device *kfd_topology_device_by_proximity_domain( uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); -struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); -int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); +struct kfd_node *kfd_device_by_id(uint32_t gpu_id); +struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); +struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); +int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ -int kfd_interrupt_init(struct kfd_dev *dev); -void kfd_interrupt_exit(struct kfd_dev *dev); -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); -bool interrupt_is_wanted(struct kfd_dev *dev, +int kfd_interrupt_init(struct kfd_node *dev); +void kfd_interrupt_exit(struct kfd_node *dev); +bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); +bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); @@ -1174,22 +1194,22 @@ void print_queue_properties(struct queue_properties *q); void print_queue(struct queue *q); struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); + struct kfd_node *dev); +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); void device_queue_manager_uninit(struct device_queue_manager *dqm); -struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, +struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type); void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); @@ -1206,7 +1226,7 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p); int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); void pqm_uninit(struct process_queue_manager *pqm); int pqm_create_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, + struct kfd_node *dev, struct file *f, struct queue_properties *properties, unsigned int *qid, @@ -1323,7 +1343,7 @@ int kfd_wait_on_events(struct kfd_process *p, uint32_t *wait_result); void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint32_t valid_id_bits); -void kfd_signal_iommu_event(struct kfd_dev *dev, +void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested); void kfd_signal_hw_exception_event(u32 pasid); @@ -1339,12 +1359,12 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, int kfd_get_num_events(struct kfd_process *p); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); -void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, +void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, struct kfd_vm_fault_info *info); -void kfd_signal_reset_event(struct kfd_dev *dev); +void kfd_signal_reset_event(struct kfd_node *dev); -void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); +void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); @@ -1359,12 +1379,12 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) bool kfd_is_locked(void); /* Compute profile */ -void kfd_inc_compute_active(struct kfd_dev *dev); -void kfd_dec_compute_active(struct kfd_dev *dev); +void kfd_inc_compute_active(struct kfd_node *dev); +void kfd_dec_compute_active(struct kfd_node *dev); /* Cgroup Support */ /* Check with device cgroup if @kfd device is accessible */ -static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) +static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) { #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = adev_to_drm(kfd->adev); @@ -1389,7 +1409,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data); int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); int pm_debugfs_runlist(struct seq_file *m, void *data); -int kfd_debugfs_hang_hws(struct kfd_dev *dev); +int kfd_debugfs_hang_hws(struct kfd_node *dev); int pm_debugfs_hang_hws(struct packet_manager *pm); int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 07a9eaf9b7d8..66e021889c64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -269,7 +269,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) int cu_cnt; int wave_cnt; int max_waves_per_cu; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; @@ -691,7 +691,7 @@ void kfd_process_destroy_wq(void) static void kfd_process_free_gpuvm(struct kgd_mem *mem, struct kfd_process_device *pdd, void **kptr) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; if (kptr && *kptr) { amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); @@ -713,7 +713,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, uint64_t gpu_va, uint32_t size, uint32_t flags, struct kgd_mem **mem, void **kptr) { - struct kfd_dev *kdev = pdd->dev; + struct kfd_node *kdev = pdd->dev; int err; err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, @@ -982,7 +982,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) static void kfd_process_kunmap_signal_bo(struct kfd_process *p) { struct kfd_process_device *pdd; - struct kfd_dev *kdev; + struct kfd_node *kdev; void *mem; kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); @@ -1040,9 +1040,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) bitmap_free(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); - kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); + kfd_free_process_doorbells(pdd->dev->kfd, pdd->doorbell_index); - if (pdd->dev->shared_resources.enable_mes) + if (pdd->dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, pdd->proc_ctx_bo); /* @@ -1259,10 +1259,10 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) int i; for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; + struct kfd_node *dev = p->pdds[i]->dev; struct qcm_process_device *qpd = &p->pdds[i]->qpd; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); @@ -1279,7 +1279,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) return err; } - memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", @@ -1291,7 +1291,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE @@ -1300,7 +1300,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) void *kaddr; int ret; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) return 0; /* cwsr_base is only set for dGPU */ @@ -1313,7 +1313,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) qpd->cwsr_kaddr = kaddr; qpd->tba_addr = qpd->cwsr_base; - memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", @@ -1324,10 +1324,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; - if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) return; kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); @@ -1371,7 +1371,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * support retry. */ for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; + struct kfd_node *dev = p->pdds[i]->dev; /* Only consider GFXv9 and higher GPUs. Older GPUs don't * support the SVM APIs and don't need to be considered @@ -1394,7 +1394,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) return false; - if (dev->noretry) + if (dev->kfd->noretry) return false; } @@ -1528,7 +1528,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, return 0; } -struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p) { int i; @@ -1540,7 +1540,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, return NULL; } -struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd = NULL; @@ -1552,7 +1552,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, if (!pdd) return NULL; - if (init_doorbell_bitmap(&pdd->qpd, dev)) { + if (init_doorbell_bitmap(&pdd->qpd, dev->kfd)) { pr_err("Failed to init doorbell for process\n"); goto err_free_pdd; } @@ -1573,7 +1573,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, AMDGPU_MES_PROC_CTX_SIZE, &pdd->proc_ctx_bo, @@ -1619,7 +1619,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; struct kfd_process *p; - struct kfd_dev *dev; + struct kfd_node *dev; int ret; if (!drm_file) @@ -1679,7 +1679,7 @@ err_reserve_ib_mem: * * Assumes that the process lock is held. */ -struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd; @@ -1811,7 +1811,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, + kfd_smi_event_queue_eviction(pdd->dev->kfd, p->lead_thread->pid, trigger); r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, @@ -1839,7 +1839,7 @@ fail: if (n_evicted == 0) break; - kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd)) @@ -1860,7 +1860,7 @@ int kfd_process_restore_queues(struct kfd_process *p) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd); @@ -2016,7 +2016,7 @@ int kfd_resume_all_processes(void) return ret; } -int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { struct kfd_process_device *pdd; @@ -2051,7 +2051,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) { struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; /* * It can be that we race and lose here, but that is extremely unlikely diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 4236539d9f93..5602498e713f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -81,7 +81,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; if (pdd->already_dequeued) return; @@ -93,7 +93,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; struct kgd_mem *mem = NULL; @@ -178,7 +178,7 @@ void pqm_uninit(struct process_queue_manager *pqm) } static int init_user_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, struct queue **q, + struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, struct file *f, struct amdgpu_bo *wptr_bo, unsigned int qid) @@ -199,7 +199,7 @@ static int init_user_queue(struct process_queue_manager *pqm, (*q)->device = dev; (*q)->process = pqm->process; - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, AMDGPU_MES_GANG_CTX_SIZE, &(*q)->gang_ctx_bo, @@ -224,7 +224,7 @@ cleanup: } int pqm_create_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, + struct kfd_node *dev, struct file *f, struct queue_properties *properties, unsigned int *qid, @@ -258,7 +258,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * Hence we also check the type as well */ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ)) - max_queues = dev->device_info.max_no_of_hqd/2; + max_queues = dev->kfd->device_info.max_no_of_hqd/2; if (pdd->qpd.queue_count >= max_queues) return -ENOSPC; @@ -354,7 +354,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, */ *p_doorbell_offset_in_process = (q->properties.doorbell_off * sizeof(uint32_t)) & - (kfd_doorbell_process_slice(dev) - 1); + (kfd_doorbell_process_slice(dev->kfd) - 1); pr_debug("PQM After DQM create queue\n"); @@ -387,7 +387,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) struct process_queue_node *pqn; struct kfd_process_device *pdd; struct device_queue_manager *dqm; - struct kfd_dev *dev; + struct kfd_node *dev; int retval; dqm = NULL; @@ -439,7 +439,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pdd->qpd.num_gws = 0; } - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo); if (pqn->q->wptr_bo) @@ -859,7 +859,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { ret = -ENOMEM; goto exit; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 0472b56de245..a0bf6558f4ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -36,7 +36,7 @@ struct kfd_smi_client { wait_queue_head_t wait_queue; /* events enabled */ uint64_t events; - struct kfd_dev *dev; + struct kfd_node *dev; spinlock_t lock; struct rcu_head rcu; pid_t pid; @@ -149,7 +149,7 @@ static void kfd_smi_ev_client_free(struct rcu_head *p) static int kfd_smi_ev_release(struct inode *inode, struct file *filep) { struct kfd_smi_client *client = filep->private_data; - struct kfd_dev *dev = client->dev; + struct kfd_node *dev = client->dev; spin_lock(&dev->smi_lock); list_del_rcu(&client->list); @@ -171,7 +171,7 @@ static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client, return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event); } -static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev, +static void add_event_to_kfifo(pid_t pid, struct kfd_node *dev, unsigned int smi_event, char *event_msg, int len) { struct kfd_smi_client *client; @@ -196,7 +196,7 @@ static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev, } __printf(4, 5) -static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev, +static void kfd_smi_event_add(pid_t pid, struct kfd_node *dev, unsigned int event, char *fmt, ...) { char fifo_in[KFD_SMI_EVENT_MSG_SIZE]; @@ -215,7 +215,7 @@ static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev, add_event_to_kfifo(pid, dev, event, fifo_in, len); } -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset) { unsigned int event; @@ -228,7 +228,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) kfd_smi_event_add(0, dev, event, "%x\n", dev->reset_seq_num); } -void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, +void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask) { kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n", @@ -236,7 +236,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); } -void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) +void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) { struct amdgpu_task_info task_info; @@ -254,17 +254,17 @@ void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->id, write_fault ? 'W' : 'R'); + address, dev->node->id, write_fault ? 'W' : 'R'); } void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->id, migration ? 'M' : 'U'); + pid, address, dev->node->id, migration ? 'M' : 'U'); } void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, @@ -273,7 +273,7 @@ void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); @@ -283,7 +283,7 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); @@ -292,16 +292,16 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->id, trigger); + dev->node->id, trigger); } void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->id); + dev->node->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -328,12 +328,12 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->id, trigger); + pid, address, last - address + 1, dev->node->id, trigger); } -int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) +int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) { struct kfd_smi_client *client; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 76fe4e0ec2d2..59cd089f80d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -24,11 +24,11 @@ #ifndef KFD_SMI_EVENTS_H_INCLUDED #define KFD_SMI_EVENTS_H_INCLUDED -int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd); -void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid); -void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, +int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd); +void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid); +void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask); -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset); +void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset); void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c02430537e9c..96ccff79902c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1266,7 +1266,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, return -EINVAL; } - kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, + kfd_smi_event_unmap_from_gpu(pdd->dev->kfd, p->lead_thread->pid, start, last, trigger); r = svm_range_unmap_from_gpu(pdd->dev->adev, @@ -3083,7 +3083,7 @@ int svm_range_list_init(struct kfd_process *p) spin_lock_init(&svms->deferred_list_lock); for (i = 0; i < p->n_pdds; i++) - if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev)) + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd)) bitmap_set(svms->bitmap_supported, i, 1); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 8e4124dcb6e4..06a11186d947 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -96,7 +96,7 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) return ret; } -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +struct kfd_node *kfd_device_by_id(uint32_t gpu_id) { struct kfd_topology_device *top_dev; @@ -107,10 +107,10 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) return top_dev->gpu; } -struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) +struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev) { struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_node *device = NULL; down_read(&topology_lock); @@ -125,10 +125,10 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev) +struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev) { struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_node *device = NULL; down_read(&topology_lock); @@ -526,7 +526,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (dev->gpu) { log_max_watch_addr = - __ilog2_u32(dev->gpu->device_info.num_of_watch_points); + __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points); if (log_max_watch_addr) { dev->node_props.capability |= @@ -548,11 +548,11 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL); sysfs_show_32bit_prop(buffer, offs, "fw_version", - dev->gpu->mec_fw_version); + dev->gpu->kfd->mec_fw_version); sysfs_show_32bit_prop(buffer, offs, "capability", dev->node_props.capability); sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", - dev->gpu->sdma_fw_version); + dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); @@ -1157,7 +1157,7 @@ void kfd_topology_shutdown(void) up_write(&topology_lock); } -static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) +static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) { uint32_t hashout; uint32_t buf[7]; @@ -1167,8 +1167,8 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) if (!gpu) return 0; - local_mem_size = gpu->local_mem_info.local_mem_size_private + - gpu->local_mem_info.local_mem_size_public; + local_mem_size = gpu->kfd->local_mem_info.local_mem_size_private + + gpu->kfd->local_mem_info.local_mem_size_public; buf[0] = gpu->adev->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16); @@ -1188,7 +1188,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) * list then return NULL. This means a new topology device has to * be created for this GPU. */ -static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) +static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu) { struct kfd_topology_device *dev; struct kfd_topology_device *out_dev = NULL; @@ -1201,7 +1201,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) /* Discrete GPUs need their own topology device list * entries. Don't assign them to CPU/APU nodes. */ - if (!gpu->use_iommu_v2 && + if (!gpu->kfd->use_iommu_v2 && dev->node_props.cpu_cores_count) continue; @@ -1275,7 +1275,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; /* set gpu (dev) flags. */ } else { - if (!dev->gpu->pci_atomic_requested || + if (!dev->gpu->kfd->pci_atomic_requested || dev->gpu->adev->asic_type == CHIP_HAWAII) link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; @@ -1569,8 +1569,8 @@ static int kfd_dev_create_p2p_links(void) if (dev == new_dev) break; if (!dev->gpu || !dev->gpu->adev || - (dev->gpu->hive_id && - dev->gpu->hive_id == new_dev->gpu->hive_id)) + (dev->gpu->kfd->hive_id && + dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) goto next; /* check if node(s) is/are peer accessible in one direction or bi-direction */ @@ -1590,7 +1590,6 @@ out: return ret; } - /* Helper function. See kfd_fill_gpu_cache_info for parameter description */ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext, struct kfd_gpu_cache_info *pcache_info, @@ -1723,7 +1722,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, /* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info * tables */ -static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev) +static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev) { struct kfd_gpu_cache_info *pcache_info = NULL; int i, j, k; @@ -1805,7 +1804,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct pr_debug("Added [%d] GPU cache entries\n", num_of_entries); } -static int kfd_topology_add_device_locked(struct kfd_dev *gpu, uint32_t gpu_id, +static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id, struct kfd_topology_device **dev) { int proximity_domain = ++topology_crat_proximity_domain; @@ -1865,7 +1864,7 @@ err: return res; } -int kfd_topology_add_device(struct kfd_dev *gpu) +int kfd_topology_add_device(struct kfd_node *gpu) { uint32_t gpu_id; struct kfd_topology_device *dev; @@ -1916,7 +1915,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.simd_arrays_per_engine = cu_info.num_shader_arrays_per_engine; - dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; + dev->node_props.gfx_target_version = + gpu->kfd->device_info.gfx_target_version; dev->node_props.vendor_id = gpu->adev->pdev->vendor; dev->node_props.device_id = gpu->adev->pdev->device; dev->node_props.capability |= @@ -1929,15 +1929,15 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; dev->node_props.drm_render_minor = - gpu->shared_resources.drm_render_minor; + gpu->kfd->shared_resources.drm_render_minor; - dev->node_props.hive_id = gpu->hive_id; + dev->node_props.hive_id = gpu->kfd->hive_id; dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); dev->node_props.num_sdma_xgmi_engines = kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = - gpu->device_info.num_sdma_queues_per_engine - - gpu->device_info.num_reserved_sdma_queues_per_engine; + gpu->kfd->device_info.num_sdma_queues_per_engine - + gpu->kfd->device_info.num_reserved_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? dev->gpu->adev->gds.gws_size : 0; @@ -1979,7 +1979,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * Overwrite ATS capability according to needs_iommu_device to fix * potential missing corresponding bit in CRAT of BIOS. */ - if (dev->gpu->use_iommu_v2) + if (dev->gpu->kfd->use_iommu_v2) dev->node_props.capability |= HSA_CAP_ATS_PRESENT; else dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; @@ -2079,7 +2079,7 @@ static void kfd_topology_update_io_links(int proximity_domain) } } -int kfd_topology_remove_device(struct kfd_dev *gpu) +int kfd_topology_remove_device(struct kfd_node *gpu) { struct kfd_topology_device *dev, *tmp; uint32_t gpu_id; @@ -2119,7 +2119,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu) * Return - 0: On success (@kdev will be NULL for non GPU nodes) * -1: If end of list */ -int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) +int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev) { struct kfd_topology_device *top_dev; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index fca30d00a9bb..3b8afb6aba79 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -75,7 +75,7 @@ struct kfd_mem_properties { uint32_t flags; uint32_t width; uint32_t mem_clk_max; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; }; @@ -93,7 +93,7 @@ struct kfd_cache_properties { uint32_t cache_latency; uint32_t cache_type; uint8_t sibling_map[CACHE_SIBLINGMAP_SIZE]; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; uint32_t sibling_map_size; @@ -113,7 +113,7 @@ struct kfd_iolink_properties { uint32_t max_bandwidth; uint32_t rec_transfer_size; uint32_t flags; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; }; @@ -135,7 +135,7 @@ struct kfd_topology_device { struct list_head io_link_props; struct list_head p2p_link_props; struct list_head perf_props; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj_node; struct kobject *kobj_mem; struct kobject *kobj_cache; From 74c5b85da75475c73a8f040397610fbfcc2c3e78 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 16:33:38 -0400 Subject: [PATCH 272/861] drm/amdkfd: Add spatial partitioning support in KFD This patch introduces multi-partition support in KFD. This patch includes: - Support for maximum 8 spatial partitions in KFD. - Initialize one HIQ per partition. - Management of VMID range depending on partition mode. - Management of doorbell aperture space between all partitions. - Each partition does its own queue management, interrupt handling, SMI event reporting. - IOMMU, if enabled with multiple partitions, will only work on first partition. - SPM is only supported on the first partition. - Currently, there is no support for resetting individual partitions. All partitions will reset together. Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 210 +++++++++++++----- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 13 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 16 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 24 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 +- 7 files changed, 206 insertions(+), 75 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 23d9a7f77055..37c6dc5c37bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -567,23 +567,27 @@ kfd_interrupt_error: return err; } -static void kfd_cleanup_node(struct kfd_dev *kfd) +static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) { - struct kfd_node *knode = kfd->node; + struct kfd_node *knode; + unsigned int i; - device_queue_manager_uninit(knode->dqm); - kfd_interrupt_exit(knode); - kfd_topology_remove_device(knode); - if (knode->gws) - amdgpu_amdkfd_free_gws(knode->adev, knode->gws); - kfree(knode); - kfd->node = NULL; + for (i = 0; i < num_nodes; i++) { + knode = kfd->nodes[i]; + device_queue_manager_uninit(knode->dqm); + kfd_interrupt_exit(knode); + kfd_topology_remove_device(knode); + if (knode->gws) + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); + kfree(knode); + kfd->nodes[i] = NULL; + } } bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { - unsigned int size, map_process_packet_size; + unsigned int size, map_process_packet_size, i; struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; @@ -596,9 +600,18 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; - last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; - vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; + if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || + kfd->adev->gfx.num_xcc_per_xcp == 0) + kfd->num_nodes = 1; + else + kfd->num_nodes = + kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; + if (kfd->num_nodes == 0) { + dev_err(kfd_device, + "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", + kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + goto out; + } /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be @@ -617,6 +630,26 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, return false; } + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; + + /* For GFX9.4.3, we need special handling for VMIDs depending on + * partition mode. + * In CPX mode, the VMID range needs to be shared between XCDs. + * Additionally, there are 13 VMIDs (3-15) available for KFD. To + * divide them equally, we change starting VMID to 4 and not use + * VMID 3. + * If the VMID range changes for GFX9.4.3, then this code MUST be + * revisited. + */ + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + vmid_num_kfd /= 2; + first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + } + /* Verify module parameters regarding mapped process number*/ if (hws_max_conc_proc >= 0) max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd); @@ -682,6 +715,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); + /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); /* Allocate the KFD node */ @@ -700,12 +734,51 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); - /* Initialize the KFD node */ - if (kfd_init_node(node)) { - dev_err(kfd_device, "Error initializing KFD node\n"); - goto node_init_error; + dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", + kfd->num_nodes); + for (i = 0; i < kfd->num_nodes; i++) { + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); + if (!node) + goto node_alloc_error; + + node->adev = kfd->adev; + node->kfd = kfd; + node->kfd2kgd = kfd->kfd2kgd; + node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); + node->start_xcc_id = node->num_xcc_per_node * i; + + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + /* For GFX9.4.3 and CPX mode, first XCD gets VMID range + * 4-9 and second XCD gets VMID range 10-15. + */ + + node->vm_info.first_vmid_kfd = (i%2 == 0) ? + first_vmid_kfd : + first_vmid_kfd+vmid_num_kfd; + node->vm_info.last_vmid_kfd = (i%2 == 0) ? + last_vmid_kfd-vmid_num_kfd : + last_vmid_kfd; + node->compute_vmid_bitmap = + ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) - + ((0x1 << (node->vm_info.first_vmid_kfd)) - 1); + } else { + node->vm_info.first_vmid_kfd = first_vmid_kfd; + node->vm_info.last_vmid_kfd = last_vmid_kfd; + node->compute_vmid_bitmap = + gpu_resources->compute_vmid_bitmap; + } + node->max_proc_per_quantum = max_proc_per_quantum; + atomic_set(&node->sram_ecc_flag, 0); + /* Initialize the KFD node */ + if (kfd_init_node(node)) { + dev_err(kfd_device, "Error initializing KFD node\n"); + goto node_init_error; + } + kfd->nodes[i] = node; } - kfd->node = node; if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; @@ -722,9 +795,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto out; kfd_resume_iommu_error: - kfd_cleanup_node(kfd); node_init_error: node_alloc_error: + kfd_cleanup_nodes(kfd, i); device_iommu_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: @@ -742,7 +815,9 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - kfd_cleanup_node(kfd); + /* Cleanup KFD nodes */ + kfd_cleanup_nodes(kfd, kfd->num_nodes); + /* Cleanup common/shared resources */ kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); @@ -754,18 +829,23 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) int kgd2kfd_pre_reset(struct kfd_dev *kfd) { - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return 0; - kfd_smi_event_update_gpu_reset(node, false); - - node->dqm->ops.pre_reset(node->dqm); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + kfd_smi_event_update_gpu_reset(node, false); + node->dqm->ops.pre_reset(node->dqm); + } kgd2kfd_suspend(kfd, false); - kfd_signal_reset_event(node); + for (i = 0; i < kfd->num_nodes; i++) + kfd_signal_reset_event(kfd->nodes[i]); + return 0; } @@ -778,19 +858,25 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { int ret; - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return 0; - ret = kfd_resume(node); - if (ret) - return ret; + for (i = 0; i < kfd->num_nodes; i++) { + ret = kfd_resume(kfd->nodes[i]); + if (ret) + return ret; + } + atomic_dec(&kfd_locked); - atomic_set(&node->sram_ecc_flag, 0); - - kfd_smi_event_update_gpu_reset(node, true); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + atomic_set(&node->sram_ecc_flag, 0); + kfd_smi_event_update_gpu_reset(node, true); + } return 0; } @@ -802,7 +888,8 @@ bool kfd_is_locked(void) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return; @@ -814,21 +901,25 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kfd_suspend_all_processes(); } - node->dqm->ops.stop(node->dqm); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + node->dqm->ops.stop(node->dqm); + } kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { - int ret, count; - struct kfd_node *node = kfd->node; + int ret, count, i; if (!kfd->init_complete) return 0; - ret = kfd_resume(node); - if (ret) - return ret; + for (i = 0; i < kfd->num_nodes; i++) { + ret = kfd_resume(kfd->nodes[i]); + if (ret) + return ret; + } /* for runtime resume, skip unlocking kfd */ if (!run_pm) { @@ -892,10 +983,10 @@ static inline void kfd_queue_work(struct workqueue_struct *wq, /* This is called directly from KGD at ISR. */ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { - uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i; bool is_patched = false; unsigned long flags; - struct kfd_node *node = kfd->node; + struct kfd_node *node; if (!kfd->init_complete) return; @@ -905,16 +996,22 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock_irqsave(&node->interrupt_lock, flags); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + spin_lock_irqsave(&node->interrupt_lock, flags); - if (node->interrupts_active - && interrupt_is_wanted(node, ih_ring_entry, - patched_ihre, &is_patched) - && enqueue_ih_ring_entry(node, - is_patched ? patched_ihre : ih_ring_entry)) - kfd_queue_work(node->ih_wq, &node->interrupt_work); + if (node->interrupts_active + && interrupt_is_wanted(node, ih_ring_entry, + patched_ihre, &is_patched) + && enqueue_ih_ring_entry(node, + is_patched ? patched_ihre : ih_ring_entry)) { + kfd_queue_work(node->ih_wq, &node->interrupt_work); + spin_unlock_irqrestore(&node->interrupt_lock, flags); + return; + } + spin_unlock_irqrestore(&node->interrupt_lock, flags); + } - spin_unlock_irqrestore(&node->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) @@ -1181,8 +1278,13 @@ int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { + /* + * TODO: Currently update SRAM ECC flag for first node. + * This needs to be updated later when we can + * identify SRAM ECC error on other nodes also. + */ if (kfd) - atomic_inc(&kfd->node->sram_ecc_flag); + atomic_inc(&kfd->nodes[0]->sram_ecc_flag); } void kfd_inc_compute_active(struct kfd_node *node) @@ -1202,8 +1304,14 @@ void kfd_dec_compute_active(struct kfd_node *node) void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { + /* + * TODO: For now, raise the throttling event only on first node. + * This will need to change after we are able to determine + * which node raised the throttling event. + */ if (kfd && kfd->init_complete) - kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); + kfd_smi_event_update_thermal_throttling(kfd->nodes[0], + throttle_bitmask); } /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 34977d89f01c..6ee17100c333 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1426,7 +1426,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 6eee9a0944f3..808ee010520a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -121,6 +121,12 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) return -EINVAL; } + if (!kfd_is_first_node(dev)) { + dev_warn_once(kfd_device, + "IOMMU supported only on first node\n"); + return 0; + } + err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread); if (!err) pdd->bound = PDD_BOUND; @@ -138,7 +144,8 @@ void kfd_iommu_unbind_process(struct kfd_process *p) int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i]->bound == PDD_BOUND) + if ((p->pdds[i]->bound == PDD_BOUND) && + (kfd_is_first_node((p->pdds[i]->dev)))) amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev, p->pasid); } @@ -281,7 +288,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) if (!kfd->use_iommu_v2) return; - kfd_unbind_processes_from_device(kfd->node); + kfd_unbind_processes_from_device(kfd->nodes[0]); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); @@ -312,7 +319,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd->node); + err = kfd_bind_processes_to_device(kfd->nodes[0]); if (err) { amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 1e187677c90a..5f4dc2a45bd0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,7 +423,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->node->id, prange->prefetch_loc, + 0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,7 +456,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->node->id, trigger); + 0, adev->kfd.dev->nodes[0]->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -701,7 +701,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->node->id, 0, prange->prefetch_loc, + adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -737,7 +737,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->node->id, 0, trigger); + adev->kfd.dev->nodes[0]->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index fdb97e5d0c01..873b49238dc1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -255,6 +255,8 @@ struct kfd_vmid_info { uint32_t vmid_num_kfd; }; +#define MAX_KFD_NODES 8 + struct kfd_dev; struct kfd_node { @@ -267,6 +269,10 @@ struct kfd_node { */ struct kfd_vmid_info vm_info; unsigned int id; /* topology stub index */ + unsigned int num_xcc_per_node; + unsigned int start_xcc_id; /* Starting XCC instance + * number for the node + */ /* Interrupts */ struct kfifo ih_fifo; struct workqueue_struct *ih_wq; @@ -300,6 +306,8 @@ struct kfd_node { /* Maximum process number mapped to HW scheduler */ unsigned int max_proc_per_quantum; + unsigned int compute_vmid_bitmap; + struct kfd_dev *kfd; }; @@ -368,7 +376,8 @@ struct kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; - struct kfd_node *node; + struct kfd_node *nodes[MAX_KFD_NODES]; + unsigned int num_nodes; }; enum kfd_mempool { @@ -1397,6 +1406,11 @@ static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) #endif } +static inline bool kfd_is_first_node(struct kfd_node *node) +{ + return (node == node->kfd->nodes[0]); +} + /* Debugfs */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index a0bf6558f4ac..b703da59e067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -254,17 +254,17 @@ void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->node->id, write_fault ? 'W' : 'R'); + address, dev->nodes[0]->id, write_fault ? 'W' : 'R'); } void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->node->id, migration ? 'M' : 'U'); + pid, address, dev->nodes[0]->id, migration ? 'M' : 'U'); } void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, @@ -273,7 +273,7 @@ void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); @@ -283,7 +283,7 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); @@ -292,16 +292,16 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->node->id, trigger); + dev->nodes[0]->id, trigger); } void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->node->id); + dev->nodes[0]->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -328,9 +328,9 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->node->id, trigger); + pid, address, last - address + 1, dev->nodes[0]->id, trigger); } int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 06a11186d947..94af37df3ed2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -555,7 +555,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); - + sysfs_show_32bit_prop(buffer, offs, "num_xcc", + dev->gpu->num_xcc_per_node); } return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", @@ -1160,7 +1161,7 @@ void kfd_topology_shutdown(void) static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) { uint32_t hashout; - uint32_t buf[7]; + uint32_t buf[8]; uint64_t local_mem_size; int i; @@ -1177,8 +1178,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) buf[4] = gpu->adev->pdev->bus->number; buf[5] = lower_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size); + buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); - for (i = 0, hashout = 0; i < 7; i++) + for (i = 0, hashout = 0; i < 8; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); return hashout; From 2f77b9a242a2e01822efc80c8b63eaa31df0f8b4 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 21:45:50 -0400 Subject: [PATCH 273/861] drm/amdkfd: Update MQD management on multi XCC setup Update MQD management for both HIQ and user-mode compute queues on a multi XCC setup. MQDs needs to be allocated, initialized, loaded and destroyed for each XCC in the KFD node. v2: squash in fix "drm/amdkfd: Fix SDMA+HIQ HQD allocation on GFX9.4.3" Signed-off-by: Mukul Joshi Signed-off-by: Amber Lin Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 ++- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 28 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 8 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 3 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 3 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 292 ++++++++++++++++-- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 3 + .../amd/amdkfd/kfd_process_queue_manager.c | 16 +- drivers/gpu/drm/amd/include/v9_structs.h | 30 +- 10 files changed, 380 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f61f07575f63..6bbe3b89aef5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -800,6 +800,41 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, sg_free_table(ttm->sg); } +/* + * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ... + * MQDn+CtrlStackn where n is the number of XCCs per partition. + * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD + * and uses memory type default, UC. The rest of pages_per_xcc are + * Ctrl stack and modify their memory type to NC. + */ +static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, + struct ttm_tt *ttm, uint64_t flags) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + uint64_t total_pages = ttm->num_pages; + int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); + uint64_t page_idx, pages_per_xcc = total_pages / num_xcc; + int i; + uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); + + for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { + /* MQD page: use default flags */ + amdgpu_gart_bind(adev, + gtt->offset + (page_idx << PAGE_SHIFT), + 1, >t->ttm.dma_address[page_idx], flags); + /* + * Ctrl pages - modify the memory type to NC (ctrl_flags) from + * the second page of the BO onward. + */ + amdgpu_gart_bind(adev, + gtt->offset + ((page_idx + 1) << PAGE_SHIFT), + pages_per_xcc - 1, + >t->ttm.dma_address[page_idx + 1], + ctrl_flags); + } +} + static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct ttm_buffer_object *tbo, uint64_t flags) @@ -812,21 +847,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, flags |= AMDGPU_PTE_TMZ; if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { - uint64_t page_idx = 1; - - amdgpu_gart_bind(adev, gtt->offset, page_idx, - gtt->ttm.dma_address, flags); - - /* The memory type of the first page defaults to UC. Now - * modify the memory type to NC from the second page of - * the BO onward. - */ - flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; - flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); - - amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), - ttm->num_pages - page_idx, - &(gtt->ttm.dma_address[page_idx]), flags); + amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags); } else { amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, gtt->ttm.dma_address, flags); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6ee17100c333..9afd3295ca85 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2247,7 +2247,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * dev->kfd->device_info.num_sdma_queues_per_engine + - dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; + (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * + dqm->dev->num_xcc_per_node); retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 61f6dd68c84b..074f6075ccc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -76,7 +76,8 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; - offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; + offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * + dev->num_xcc_per_node; mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem + offset); @@ -246,3 +247,28 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd, { return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } + +uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev) +{ + return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; +} + +void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj, + uint32_t virtual_xcc_id) +{ + uint64_t offset; + + offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id; + + mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ? + dev->dqm->hiq_sdma_mqd.gtt_mem : NULL; + mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; + mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t) + dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); +} + +uint64_t kfd_mqd_stride(struct mqd_manager *mm, + struct queue_properties *q) +{ + return mm->mqd_size; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 46fc3f273d0d..eb18be74f559 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -119,6 +119,8 @@ struct mqd_manager { int (*debugfs_show_mqd)(struct seq_file *m, void *data); #endif uint32_t (*read_doorbell_id)(void *mqd); + uint64_t (*mqd_stride)(struct mqd_manager *mm, + struct queue_properties *p); struct mutex mqd_mutex; struct kfd_node *dev; @@ -164,4 +166,10 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); +void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, + struct kfd_mem_obj *mqd_mem_obj, uint32_t virtual_xcc_id); + +uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev); +uint64_t kfd_mqd_stride(struct mqd_manager *mm, + struct queue_properties *q); #endif /* KFD_MQD_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 03e04d5e5a11..ca1966466759 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -428,6 +428,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct cik_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -442,6 +443,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct cik_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -457,6 +459,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 7a93be0ebb19..c9565ea99df5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -432,6 +432,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->get_wave_state = get_wave_state; mqd->checkpoint_mqd = checkpoint_mqd; mqd->restore_mqd = restore_mqd; + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -447,6 +448,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v10_compute_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -478,6 +480,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct v10_sdma_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 943a738e73f9..c677322057dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -33,6 +33,21 @@ #include "sdma0/sdma0_4_0_sh_mask.h" #include "amdgpu_amdkfd.h" +static void update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, + struct mqd_update_info *minfo); + +static uint64_t mqd_stride_v9(struct mqd_manager *mm, + struct queue_properties *q) +{ + if (mm->dev->kfd->cwsr_enabled && + q->type == KFD_QUEUE_TYPE_COMPUTE) + return ALIGN(q->ctl_stack_size, PAGE_SIZE) + + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE); + + return mm->mqd_size; +} + static inline struct v9_mqd *get_mqd(void *mqd) { return (struct v9_mqd *)mqd; @@ -110,8 +125,9 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, if (!mqd_mem_obj) return NULL; retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, - ALIGN(q->ctl_stack_size, PAGE_SIZE) + - ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), + (ALIGN(q->ctl_stack_size, PAGE_SIZE) + + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * + node->num_xcc_per_node, &(mqd_mem_obj->gtt_mem), &(mqd_mem_obj->gpu_addr), (void *)&(mqd_mem_obj->cpu_ptr), true); @@ -165,24 +181,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; - if (q->format == KFD_QUEUE_FORMAT_AQL) { + if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; - if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { - /* On GC 9.4.3, DW 41 is re-purposed as - * compute_tg_chunk_size. - * TODO: review this setting when active CUs in the - * partition play a role - */ - m->compute_static_thread_mgmt_se6 = 1; - } - } else { - /* PM4 queue */ - if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { - m->compute_static_thread_mgmt_se6 = 0; - /* TODO: program pm4_target_xcc */ - } - } if (q->tba_addr) { m->compute_pgm_rsrc2 |= @@ -205,7 +206,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q, NULL); + update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -269,13 +270,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_vmid = q->vmid; if (q->format == KFD_QUEUE_FORMAT_AQL) { - m->cp_hqd_pq_control |= + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; - if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) - m->cp_hqd_pq_control |= - CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } @@ -466,6 +464,224 @@ static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd, qp->is_active = 0; } +static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + struct v9_mqd *m; + int xcc = 0; + struct kfd_mem_obj xcc_mqd_mem_obj; + uint64_t xcc_gart_addr = 0; + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc); + + init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); + + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | + 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; + m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev); + if (xcc == 0) { + /* Set no_update_rptr = 0 in Master XCC */ + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = m; + *gart_addr = xcc_gart_addr; + } + } +} + +static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + int xcc, err; + void *xcc_mqd; + uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + hiq_mqd_size * xcc; + err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, + pipe_id, queue_id, + p->doorbell_off); + if (err) { + pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); + break; + } + } + + return err; +} + +static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int xcc = 0, err; + void *xcc_mqd; + uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + hiq_mqd_size * xcc; + err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, + type, timeout, pipe_id, + queue_id); + if (err) { + pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + break; + } + } + + return err; +} + +static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj, + struct kfd_mem_obj *xcc_mqd_mem_obj, + uint64_t offset) +{ + xcc_mqd_mem_obj->gtt_mem = (offset == 0) ? + mqd_mem_obj->gtt_mem : NULL; + xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset; + xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr + + offset); +} + +static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + struct v9_mqd *m; + int xcc = 0; + struct kfd_mem_obj xcc_mqd_mem_obj; + uint64_t xcc_gart_addr = 0; + uint64_t offset = mm->mqd_stride(mm, q); + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc); + + init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); + + m->cp_mqd_stride_size = offset; + if (q->format == KFD_QUEUE_FORMAT_AQL) { + m->compute_tg_chunk_size = 1; + + switch (xcc) { + case 0: + /* Master XCC */ + m->cp_hqd_pq_control &= + ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + m->compute_current_logic_xcc_id = + mm->dev->num_xcc_per_node - 1; + break; + default: + m->compute_current_logic_xcc_id = + xcc - 1; + break; + } + } else { + /* PM4 Queue */ + m->compute_current_logic_xcc_id = 0; + m->compute_tg_chunk_size = 0; + } + + if (xcc == 0) { + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = m; + *gart_addr = xcc_gart_addr; + } + } +} + +static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, struct mqd_update_info *minfo) +{ + struct v9_mqd *m; + int xcc = 0; + uint64_t size = mm->mqd_stride(mm, q); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + m = get_mqd(mqd + size * xcc); + update_mqd(mm, m, q, minfo); + + if (q->format == KFD_QUEUE_FORMAT_AQL) { + switch (xcc) { + case 0: + /* Master XCC */ + m->cp_hqd_pq_control &= + ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + m->compute_current_logic_xcc_id = + mm->dev->num_xcc_per_node - 1; + break; + default: + m->compute_current_logic_xcc_id = + xcc - 1; + break; + } + m->compute_tg_chunk_size = 1; + } else { + /* PM4 Queue */ + m->compute_current_logic_xcc_id = 0; + m->compute_tg_chunk_size = 0; + } + } +} + +static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int xcc = 0, err; + void *xcc_mqd; + struct v9_mqd *m; + uint64_t mqd_offset; + + m = get_mqd(mqd); + mqd_offset = m->cp_mqd_stride_size; + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + mqd_offset * xcc; + err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, + type, timeout, pipe_id, + queue_id); + if (err) { + pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + break; + } + } + + return err; +} + +static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); + int xcc = 0, err; + void *xcc_mqd; + uint64_t mqd_stride_size = mm->mqd_stride(mm, p); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + mqd_stride_size * xcc; + err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, + pipe_id, queue_id, + (uint32_t __user *)p->write_ptr, + wptr_shift, 0, mms); + if (err) { + pr_debug("Load MQD failed for xcc: %d\n", xcc); + break; + } + } + + return err; +} + #if defined(CONFIG_DEBUG_FS) static int debugfs_show_mqd(struct seq_file *m, void *data) @@ -501,34 +717,49 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: mqd->allocate_mqd = allocate_mqd; - mqd->init_mqd = init_mqd; mqd->free_mqd = kfd_free_mqd_cp; - mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->get_wave_state = get_wave_state; mqd->get_checkpoint_info = get_checkpoint_info; mqd->checkpoint_mqd = checkpoint_mqd; mqd->restore_mqd = restore_mqd; mqd->mqd_size = sizeof(struct v9_mqd); + mqd->mqd_stride = mqd_stride_v9; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { + mqd->init_mqd = init_mqd_v9_4_3; + mqd->load_mqd = load_mqd_v9_4_3; + mqd->update_mqd = update_mqd_v9_4_3; + mqd->destroy_mqd = destroy_mqd_v9_4_3; + } else { + mqd->init_mqd = init_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd; + mqd->destroy_mqd = kfd_destroy_mqd_cp; + } break; case KFD_MQD_TYPE_HIQ: mqd->allocate_mqd = allocate_hiq_mqd; - mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; - mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v9_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif mqd->read_doorbell_id = read_doorbell_id; + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { + mqd->init_mqd = init_mqd_hiq_v9_4_3; + mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3; + mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3; + } else { + mqd->init_mqd = init_mqd_hiq; + mqd->load_mqd = kfd_hiq_load_mqd_kiq; + mqd->destroy_mqd = kfd_destroy_mqd_cp; + } break; case KFD_MQD_TYPE_DIQ: mqd->allocate_mqd = allocate_mqd; @@ -554,6 +785,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct v9_sdma_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index f6b4a5686dcb..8736a3cdbe1e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -486,6 +486,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct vi_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -500,6 +501,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct vi_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -515,6 +517,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct vi_sdma_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 5602498e713f..b1fb017b2ef8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -927,7 +927,9 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) struct queue *q; enum KFD_MQD_TYPE mqd_type; struct mqd_manager *mqd_mgr; - int r = 0; + int r = 0, xcc, num_xccs = 1; + void *mqd; + uint64_t size = 0; list_for_each_entry(pqn, &pqm->queues, process_queue_list) { if (pqn->q) { @@ -943,6 +945,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) seq_printf(m, " Compute queue on device %x\n", q->device->id); mqd_type = KFD_MQD_TYPE_CP; + num_xccs = q->device->num_xcc_per_node; break; default: seq_printf(m, @@ -951,6 +954,8 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) continue; } mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type]; + size = mqd_mgr->mqd_stride(mqd_mgr, + &q->properties); } else if (pqn->kq) { q = pqn->kq->queue; mqd_mgr = pqn->kq->mqd_mgr; @@ -972,9 +977,12 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) continue; } - r = mqd_mgr->debugfs_show_mqd(m, q->mqd); - if (r != 0) - break; + for (xcc = 0; xcc < num_xccs; xcc++) { + mqd = q->mqd + size * xcc; + r = mqd_mgr->debugfs_show_mqd(m, mqd); + if (r != 0) + break; + } } return r; diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index a0c672889fe4..a2f81b9c38af 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -196,10 +196,20 @@ struct v9_mqd { uint32_t compute_wave_restore_addr_lo; uint32_t compute_wave_restore_addr_hi; uint32_t compute_wave_restore_control; - uint32_t compute_static_thread_mgmt_se4; - uint32_t compute_static_thread_mgmt_se5; - uint32_t compute_static_thread_mgmt_se6; - uint32_t compute_static_thread_mgmt_se7; + union { + struct { + uint32_t compute_static_thread_mgmt_se4; + uint32_t compute_static_thread_mgmt_se5; + uint32_t compute_static_thread_mgmt_se6; + uint32_t compute_static_thread_mgmt_se7; + }; + struct { + uint32_t compute_current_logic_xcc_id; // offset: 39 (0x27) + uint32_t compute_restart_cg_tg_id; // offset: 40 (0x28) + uint32_t compute_tg_chunk_size; // offset: 41 (0x29) + uint32_t compute_restore_tg_chunk_size; // offset: 42 (0x2A) + }; + }; uint32_t reserved_43; uint32_t reserved_44; uint32_t reserved_45; @@ -382,8 +392,16 @@ struct v9_mqd { uint32_t iqtimer_pkt_dw29; uint32_t iqtimer_pkt_dw30; uint32_t iqtimer_pkt_dw31; - uint32_t reserved_225; - uint32_t reserved_226; + union { + struct { + uint32_t reserved_225; + uint32_t reserved_226; + }; + struct { + uint32_t pm4_target_xcc_in_xcp; // offset: 225 (0xE1) + uint32_t cp_mqd_stride_size; // offset: 226 (0xE2) + }; + }; uint32_t reserved_227; uint32_t set_resources_header; uint32_t set_resources_dw1; From 3c8bdb51be0e895010da62dfa173bb1227ff3b6f Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 21:50:43 -0400 Subject: [PATCH 274/861] drm/amdkfd: Add PM4 target XCC In a device that supports multiple XCCs, unlike AQL queues, the PM4 queue will be only processed in one XCC in the partitioning. This patch re-purposes the queue percentage variable in create queue and update queue ioctl for the user space to specify the target XCC. Signed-off-by: Amber Lin Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 22 +++++++++++++++---- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + .../amd/amdkfd/kfd_process_queue_manager.c | 1 + 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index eb0b0b38f10e..45e8da125f70 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -186,7 +186,12 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, static int set_queue_properties_from_user(struct queue_properties *q_properties, struct kfd_ioctl_create_queue_args *args) { - if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { + /* + * Repurpose queue percentage to accommodate new features: + * bit 0-7: queue percentage + * bit 8-15: pm4_target_xcc + */ + if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); return -EINVAL; } @@ -236,7 +241,9 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, q_properties->is_interop = false; q_properties->is_gws = false; - q_properties->queue_percent = args->queue_percentage; + q_properties->queue_percent = args->queue_percentage & 0xFF; + /* bit 8-15 are repurposed to be PM4 target XCC */ + q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; q_properties->priority = args->queue_priority; q_properties->queue_address = args->ring_base_address; q_properties->queue_size = args->ring_size; @@ -442,7 +449,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, struct kfd_ioctl_update_queue_args *args = data; struct queue_properties properties; - if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { + /* + * Repurpose queue percentage to accommodate new features: + * bit 0-7: queue percentage + * bit 8-15: pm4_target_xcc + */ + if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); return -EINVAL; } @@ -466,7 +478,9 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, properties.queue_address = args->ring_base_address; properties.queue_size = args->ring_size; - properties.queue_percent = args->queue_percentage; + properties.queue_percent = args->queue_percentage & 0xFF; + /* bit 8-15 are repurposed to be PM4 target XCC */ + properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; properties.priority = args->queue_priority; pr_debug("Updating queue id %d for pasid 0x%x\n", diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index c677322057dd..b46c984b3a17 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -587,6 +587,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, /* PM4 Queue */ m->compute_current_logic_xcc_id = 0; m->compute_tg_chunk_size = 0; + m->pm4_target_xcc_in_xcp = q->pm4_target_xcc; } if (xcc == 0) { @@ -627,6 +628,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, /* PM4 Queue */ m->compute_current_logic_xcc_id = 0; m->compute_tg_chunk_size = 0; + m->pm4_target_xcc_in_xcp = q->pm4_target_xcc; } } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 873b49238dc1..1337fcdf8958 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -509,6 +509,7 @@ struct queue_properties { bool is_evicted; bool is_active; bool is_gws; + uint32_t pm4_target_xcc; /* Not relevant for user mode queues in cp scheduling */ unsigned int vmid; /* Relevant only for sdma queues*/ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index b1fb017b2ef8..2b2ae0c9902b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -477,6 +477,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm, pqn->q->properties.queue_size = p->queue_size; pqn->q->properties.queue_percent = p->queue_percent; pqn->q->properties.priority = p->priority; + pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc; retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, pqn->q, NULL); From e2069a7b0880ccdc6fa6530b6091e47168705425 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:20:52 -0400 Subject: [PATCH 275/861] drm/amdkfd: Add XCC instance to kgd2kfd interface (v3) Gfx 9 starts to have multiple XCC instances in one device. Add instance parameter to kgd2kfd functions where XCC instance was hard coded as 0. Also, update code to pass the correct instance number when running on a multi-XCC setup. v2: introduce the XCC instance to gfx v11 (Morris) v3: rebase (Alex) Signed-off-by: Amber Lin Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Tested-by: Amber Lin Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 38 ++--- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 22 +-- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 27 +-- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 19 ++- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 17 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 17 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 160 +++++++++--------- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 29 ++-- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 111 +++++++----- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 6 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 2 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 2 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 15 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 +- .../gpu/drm/amd/include/kgd_kfd_interface.h | 25 +-- 17 files changed, 274 insertions(+), 222 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 562e1a04160f..49d8087e469e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -33,7 +33,7 @@ #include "soc15.h" static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, - u32 pasid, unsigned int vmid) + u32 pasid, unsigned int vmid, uint32_t inst) { unsigned long timeout; @@ -47,11 +47,11 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | ATC_VMID0_PASID_MAPPING__VALID_MASK; - WREG32(SOC15_REG_OFFSET(ATHUB, 0, + WREG32(SOC15_REG_OFFSET(ATHUB, inst, regATC_VMID0_PASID_MAPPING) + vmid, pasid_mapping); timeout = jiffies + msecs_to_jiffies(10); - while (!(RREG32(SOC15_REG_OFFSET(ATHUB, 0, + while (!(RREG32(SOC15_REG_OFFSET(ATHUB, inst, regATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & (1U << vmid))) { if (time_after(jiffies, timeout)) { @@ -61,13 +61,13 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, cpu_relax(); } - WREG32(SOC15_REG_OFFSET(ATHUB, 0, + WREG32(SOC15_REG_OFFSET(ATHUB, inst, regATC_VMID_PASID_MAPPING_UPDATE_STATUS), 1U << vmid); - WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, + WREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT) + vmid, pasid_mapping); - WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid, + WREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT_MM) + vmid, pasid_mapping); return 0; @@ -81,7 +81,7 @@ static inline struct v9_mqd *get_mqd(void *mqd) static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct v9_mqd *m; uint32_t *mqd_hqd; @@ -89,12 +89,12 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, m = get_mqd(mqd); - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); /* HQD registers extend to CP_HQD_AQL_DISPATCH_ID_HI */ mqd_hqd = &m->cp_mqd_base_addr_lo; - hqd_base = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR); - hqd_end = SOC15_REG_OFFSET(GC, 0, regCP_HQD_AQL_DISPATCH_ID_HI); + hqd_base = SOC15_REG_OFFSET(GC, inst, regCP_MQD_BASE_ADDR); + hqd_end = SOC15_REG_OFFSET(GC, inst, regCP_HQD_AQL_DISPATCH_ID_HI); for (reg = hqd_base; reg <= hqd_end; reg++) WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); @@ -103,7 +103,7 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_DOORBELL_CONTROL), data); if (wptr) { @@ -133,29 +133,29 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_LO), lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_POLL_ADDR), lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, regCP_PQ_WPTR_POLL_CNTL1), + WREG32(SOC15_REG_OFFSET(GC, inst, regCP_PQ_WPTR_POLL_CNTL1), (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_EOP_RPTR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_EOP_RPTR), REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_ACTIVE), data); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index f599e1e74fcc..7b60268d93c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -79,7 +79,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -91,7 +91,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi } static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -135,7 +135,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, * but still works */ -static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -205,7 +206,7 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct v10_compute_mqd *m; uint32_t *mqd_hqd; @@ -286,7 +287,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; @@ -338,7 +339,7 @@ out_unlock: static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -469,7 +470,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -510,7 +511,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -673,7 +674,7 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; @@ -709,7 +710,8 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev, } static void program_trap_handler_settings(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 5c4152ae44da..52d0d35fb58d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -80,7 +80,7 @@ static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t v uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -93,7 +93,7 @@ static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t v /* ATC is defeatured on Sienna_Cichlid */ static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; @@ -105,7 +105,8 @@ static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int return 0; } -static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id) +static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -177,7 +178,7 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct v10_compute_mqd *m; uint32_t *mqd_hqd; @@ -273,7 +274,7 @@ static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; @@ -325,7 +326,7 @@ out_unlock: static int hqd_dump_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -456,7 +457,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -498,7 +499,7 @@ static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -586,7 +587,7 @@ static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, static int wave_control_execute_v10_3(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; @@ -628,7 +629,8 @@ static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, } static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -765,7 +767,7 @@ uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev, * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ void get_iq_wait_times_v10_3(struct amdgpu_device *adev, - uint32_t *wait_times) + uint32_t *wait_times, uint32_t inst) { *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); @@ -775,7 +777,8 @@ void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data) + uint32_t *reg_data, + uint32_t inst) { *reg_data = wait_times; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 5cdb7289d35b..7deff8a547fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -78,7 +78,7 @@ static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -89,7 +89,7 @@ static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmi } static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; @@ -101,7 +101,8 @@ static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int p return 0; } -static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id) +static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -162,7 +163,7 @@ static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd) static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) + struct mm_struct *mm, uint32_t inst) { struct v11_compute_mqd *m; uint32_t *mqd_hqd; @@ -258,7 +259,7 @@ static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v11_compute_mqd *m; @@ -310,7 +311,7 @@ out_unlock: static int hqd_dump_v11(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -445,7 +446,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev, } static bool hqd_is_occupied_v11(struct amdgpu_device *adev, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) + uint32_t pipe_id, uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -486,7 +487,7 @@ static bool hqd_sdma_is_occupied_v11(struct amdgpu_device *adev, void *mqd) static int hqd_destroy_v11(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -571,7 +572,7 @@ static int hqd_sdma_destroy_v11(struct amdgpu_device *adev, void *mqd, static int wave_control_execute_v11(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index e83cb1c09610..6bf448ab3dff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -78,7 +78,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -91,7 +91,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi } static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -114,7 +114,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, return 0; } -static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -158,7 +159,7 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct cik_mqd *m; uint32_t *mqd_hqd; @@ -202,7 +203,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS (35+4) @@ -318,7 +319,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -358,7 +359,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t temp; enum hqd_dequeue_request_type type; @@ -494,7 +495,7 @@ static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 870f352837fc..cd06e4a6d1da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -72,7 +72,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -85,7 +85,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi } static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -109,7 +109,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, return 0; } -static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -153,7 +154,7 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct vi_mqd *m; uint32_t *mqd_hqd; @@ -226,7 +227,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS (54+4) @@ -350,7 +351,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -390,7 +391,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t temp; enum hqd_dequeue_request_type type; @@ -540,7 +541,7 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index d36219ecd3dd..7918a00cbb5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,26 +46,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, - uint32_t queue, uint32_t vmid) +static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid, uint32_t inst) { mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, mec, pipe, queue, vmid, 0); + soc15_grbm_select(adev, mec, pipe, queue, vmid, inst); } -static void unlock_srbm(struct amdgpu_device *adev) +static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst) { - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, inst); mutex_unlock(&adev->srbm_mutex); } void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(adev, mec, pipe, queue_id, 0); + kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst); } uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, @@ -77,28 +77,28 @@ uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -void kgd_gfx_v9_release_queue(struct amdgpu_device *adev) +void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst) { - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); } void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { - lock_srbm(adev, 0, 0, 0, vmid); + kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmSH_MEM_CONFIG), sh_mem_config); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmSH_MEM_BASES), sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); } int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -156,7 +156,8 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, * but still works */ -int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -164,13 +165,13 @@ int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(adev, mec, pipe, 0, 0); + kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst); - WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, + WREG32_SOC15(GC, inst, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); return 0; } @@ -220,7 +221,8 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, + uint32_t inst) { struct v9_mqd *m; uint32_t *mqd_hqd; @@ -228,21 +230,22 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, m = get_mqd(mqd); - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; - hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + hqd_base = SOC15_REG_OFFSET(GC, inst, mmCP_MQD_BASE_ADDR); for (reg = hqd_base; - reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + reg <= SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI); reg++) WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_DOORBELL_CONTROL), + data); if (wptr) { /* Don't read wptr with get_user because the user @@ -271,43 +274,43 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_LO), lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_POLL_ADDR), lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_EOP_RPTR), REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_ACTIVE), data); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return 0; } int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring; struct v9_mqd *m; uint32_t mec, pipe; int r; m = get_mqd(mqd); - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -315,7 +318,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&adev->gfx.kiq[inst].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -342,15 +345,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq[0].ring_lock); - kgd_gfx_v9_release_queue(adev); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + kgd_gfx_v9_release_queue(adev, inst); return r; } int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -365,13 +368,13 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, if (*dump == NULL) return -ENOMEM; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); - for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); - reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + for (reg = SOC15_REG_OFFSET(GC, inst, mmCP_MQD_BASE_ADDR); + reg <= SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -481,23 +484,23 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; uint32_t low, high; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); - act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); + act = RREG32_SOC15(GC, inst, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && - high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) + if (low == RREG32_SOC15(GC, inst, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, inst, mmCP_HQD_PQ_BASE_HI)) retval = true; } - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return retval; } @@ -522,7 +525,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -532,10 +535,10 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); if (m->cp_hqd_vmid == 0) - WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); + WREG32_FIELD15_RLC(GC, inst, RLC_CP_SCHEDULERS, scheduler1, 0); switch (reset_type) { case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: @@ -552,22 +555,22 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, break; } - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_DEQUEUE_REQUEST), type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); + temp = RREG32_SOC15(GC, inst, mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return -ETIME; } usleep_range(500, 1000); } - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return 0; } @@ -624,14 +627,14 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); - WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); - WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd); + WREG32_SOC15_RLC_SHADOW(GC, inst, mmGRBM_GFX_INDEX, gfx_index_val); + WREG32_SOC15(GC, inst, mmSQ_CMD, sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -640,7 +643,7 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW(GC, inst, mmGRBM_GFX_INDEX, data); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -685,7 +688,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * is being collected */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, - int *wave_cnt, int *vmid) + int *wave_cnt, int *vmid, uint32_t inst) { int pipe_idx; int queue_slot; @@ -700,12 +703,12 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, *wave_cnt = 0; pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; - soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, 0); - reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + + soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst); + reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot); *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; if (*wave_cnt != 0) - *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) & + *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) & CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT; } @@ -756,7 +759,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * Reading registers referenced above involves programming GRBM appropriately */ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu) + int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst) { int qidx; int vmid; @@ -772,7 +775,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); lock_spi_csq_mutexes(adev); - soc15_grbm_select(adev, 1, 0, 0, 0, 0); + soc15_grbm_select(adev, 1, 0, 0, 0, inst); /* * Iterate through the shader engines and arrays of the device @@ -787,8 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, for (se_idx = 0; se_idx < se_cnt; se_idx++) { for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, 0); - queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS); + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst); + queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS); /* * Assumption: queue map encodes following schema: four @@ -808,10 +811,11 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, continue; /* Get number of waves in flight and aggregate them */ - get_wave_count(adev, qidx, &wave_cnt, &vmid); + get_wave_count(adev, qidx, &wave_cnt, &vmid, + inst); if (wave_cnt != 0) { pasid_tmp = - RREG32(SOC15_REG_OFFSET(OSSSYS, 0, + RREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT) + vmid); if (pasid_tmp == pasid) vmid_wave_cnt += wave_cnt; @@ -820,8 +824,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst); + soc15_grbm_select(adev, 0, 0, 0, 0, inst); unlock_spi_csq_mutexes(adev); /* Update the output parameters and return */ @@ -831,27 +835,27 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) { - lock_srbm(adev, 0, 0, 0, vmid); + kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); /* * Program TBA registers */ - WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TBA_LO, lower_32_bits(tba_addr >> 8)); - WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TBA_HI, upper_32_bits(tba_addr >> 8)); /* * Program TMA registers */ - WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TMA_LO, lower_32_bits(tma_addr >> 8)); - WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TMA_HI, upper_32_bits(tma_addr >> 8)); - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); } const struct kfd2kgd_calls gfx_v9_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 491273a02e30..a241299f4fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -25,41 +25,42 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); + uint32_t sh_mem_bases, uint32_t inst); int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid); -int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id); + unsigned int vmid, uint32_t inst); +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst); int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); + struct mm_struct *mm, uint32_t inst); int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off); + uint32_t doorbell_off, uint32_t inst); int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst); bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd); + uint32_t sq_cmd, uint32_t inst); bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); - void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu); + int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst); void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst); void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id); -void kgd_gfx_v9_release_queue(struct amdgpu_device *adev); +void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 9afd3295ca85..33a9394f9e58 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -127,12 +127,16 @@ static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manag void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - return dqm->dev->kfd2kgd->program_sh_mem_settings( + int xcc = 0; + + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + dqm->dev->kfd2kgd->program_sh_mem_settings( dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, - qpd->sh_mem_bases); + qpd->sh_mem_bases, + dqm->dev->start_xcc_id + xcc); } static void kfd_hws_hang(struct device_queue_manager *dqm) @@ -405,10 +409,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, static void program_trap_handler_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { + int xcc = 0; + if (dqm->dev->kfd2kgd->program_trap_handler_settings) - dqm->dev->kfd2kgd->program_trap_handler_settings( + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + dqm->dev->kfd2kgd->program_trap_handler_settings( dqm->dev->adev, qpd->vmid, - qpd->tba_addr, qpd->tma_addr); + qpd->tba_addr, qpd->tma_addr, + dqm->dev->start_xcc_id + xcc); } static int allocate_vmid(struct device_queue_manager *dqm, @@ -671,6 +679,7 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process struct kfd_process_device *pdd; int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; + int xcc = 0; reg_sq_cmd.u32All = 0; reg_gfx_index.u32All = 0; @@ -715,9 +724,11 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; reg_sq_cmd.bits.vm_id = vmid; - dev->kfd2kgd->wave_control_execute(dev->adev, + for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) + dev->kfd2kgd->wave_control_execute(dev->adev, reg_gfx_index.u32All, - reg_sq_cmd.u32All); + reg_sq_cmd.u32All, + dev->start_xcc_id + xcc); return 0; } @@ -1229,17 +1240,32 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { - return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->adev, pasid, vmid); + int xcc = 0, ret; + + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( + dqm->dev->adev, pasid, vmid, + dqm->dev->start_xcc_id + xcc); + if (ret) + break; + } + + return ret; } static void init_interrupts(struct device_queue_manager *dqm) { - unsigned int i; + unsigned int i, xcc; - for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) - if (is_pipe_enabled(dqm, 0, i)) - dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i); + for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { + if (is_pipe_enabled(dqm, 0, i)) { + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + dqm->dev->kfd2kgd->init_interrupts( + dqm->dev->adev, i, + dqm->dev->start_xcc_id + + xcc); + } + } } static void init_sdma_bitmaps(struct device_queue_manager *dqm) @@ -2455,45 +2481,50 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) struct device_queue_manager *dqm = data; uint32_t (*dump)[2], n_regs; int pipe, queue; - int r = 0; + int r = 0, xcc; + uint32_t inst; if (!dqm->sched_running) { seq_puts(m, " Device is stopped\n"); return 0; } - r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + inst = dqm->dev->start_xcc_id + xcc; + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, - &dump, &n_regs); - if (!r) { - seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n", - KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, - KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), - KFD_CIK_HIQ_QUEUE); - seq_reg_dump(m, dump, n_regs); - - kfree(dump); - } - - for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { - int pipe_offset = pipe * get_queues_per_pipe(dqm); - - for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { - if (!test_bit(pipe_offset + queue, - dqm->dev->kfd->shared_resources.cp_queue_bitmap)) - continue; - - r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->adev, pipe, queue, &dump, &n_regs); - if (r) - break; - - seq_printf(m, " CP Pipe %d, Queue %d\n", - pipe, queue); + &dump, &n_regs, inst); + if (!r) { + seq_printf(m, + " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", + inst, KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, + KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), + KFD_CIK_HIQ_QUEUE); seq_reg_dump(m, dump, n_regs); kfree(dump); } + + for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { + int pipe_offset = pipe * get_queues_per_pipe(dqm); + + for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { + if (!test_bit(pipe_offset + queue, + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) + continue; + + r = dqm->dev->kfd2kgd->hqd_dump( + dqm->dev->adev, pipe, queue, &dump, &n_regs, inst); + if (r) + break; + + seq_printf(m, " Inst %d, CP Pipe %d, Queue %d\n", + inst, pipe, queue); + seq_reg_dump(m, dump, n_regs); + + kfree(dump); + } + } } for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 074f6075ccc7..d81125421aaf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -190,7 +190,7 @@ int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, struct queue_properties *p, struct mm_struct *mms) { return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, - queue_id, p->doorbell_off); + queue_id, p->doorbell_off, 0); } int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd, @@ -198,7 +198,7 @@ int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, - pipe_id, queue_id); + pipe_id, queue_id, 0); } void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, @@ -217,7 +217,7 @@ bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, - pipe_id, queue_id); + pipe_id, queue_id, 0); } int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index ca1966466759..eb11940bec34 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -167,7 +167,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, wptr_mask, mms); + wptr_shift, wptr_mask, mms, 0); } static void __update_mqd(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index c9565ea99df5..d54c6fdebbb6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -151,7 +151,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, 0); return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index dff171b54b5c..338d639c1898 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -184,7 +184,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, 0); return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index b46c984b3a17..b53cd8f9620b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -218,7 +218,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, 0); } static void update_mqd(struct mqd_manager *mm, void *mqd, @@ -501,13 +501,15 @@ static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, { int xcc, err; void *xcc_mqd; + uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { xcc_mqd = mqd + hiq_mqd_size * xcc; err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, - p->doorbell_off); + p->doorbell_off, + start_inst+xcc); if (err) { pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); break; @@ -523,13 +525,14 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { int xcc = 0, err; void *xcc_mqd; + uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { xcc_mqd = mqd + hiq_mqd_size * xcc; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id); + queue_id, start_inst+xcc); if (err) { pr_debug("Destroy MQD failed for xcc: %d\n", xcc); break; @@ -641,6 +644,7 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, void *xcc_mqd; struct v9_mqd *m; uint64_t mqd_offset; + uint32_t start_inst = mm->dev->start_xcc_id; m = get_mqd(mqd); mqd_offset = m->cp_mqd_stride_size; @@ -649,7 +653,7 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, xcc_mqd = mqd + mqd_offset * xcc; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id); + queue_id, start_inst+xcc); if (err) { pr_debug("Destroy MQD failed for xcc: %d\n", xcc); break; @@ -667,6 +671,7 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); int xcc = 0, err; void *xcc_mqd; + uint32_t start_inst = mm->dev->start_xcc_id; uint64_t mqd_stride_size = mm->mqd_stride(mm, p); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { @@ -674,7 +679,7 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, start_inst+xcc); if (err) { pr_debug("Load MQD failed for xcc: %d\n", xcc); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 8736a3cdbe1e..ebf963f42b51 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -165,7 +165,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, wptr_mask, mms); + wptr_shift, wptr_mask, mms, 0); } static void __update_mqd(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 66e021889c64..888590dfa646 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -290,7 +290,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) wave_cnt = 0; max_waves_per_cu = 0; dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, - &max_waves_per_cu); + &max_waves_per_cu, 0); /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 5cb3e8634739..8cb3dbcae3e4 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -230,28 +230,30 @@ struct kfd2kgd_calls { /* Register access functions */ void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, - uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); + uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases, + uint32_t inst); int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid); + unsigned int vmid, uint32_t inst); - int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id); + int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst); int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); + struct mm_struct *mm, uint32_t inst); int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off); + uint32_t doorbell_off, uint32_t inst); int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); int (*hqd_dump)(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst); int (*hqd_sdma_dump)(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, @@ -259,12 +261,12 @@ struct kfd2kgd_calls { bool (*hqd_is_occupied)(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int timeout, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); @@ -273,7 +275,7 @@ struct kfd2kgd_calls { int (*wave_control_execute)(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd); + uint32_t sq_cmd, uint32_t inst); bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); @@ -290,9 +292,10 @@ struct kfd2kgd_calls { uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, - int *wave_cnt, int *max_waves_per_cu); + int *wave_cnt, int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst); }; #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ From f87f686482c6d2d4465245356854710b01f312c1 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:22:20 -0400 Subject: [PATCH 276/861] drm/amdgpu: Add XCC inst to PASID TLB flushing Add XCC instance to select the correct KIQ ring when flushing TLBs on a multi-XCC setup. Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 7 ++++--- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 ++++++------ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 7 +++++-- 9 files changed, 25 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5afbcc390d89..9d19c7ceda3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -743,7 +743,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, } int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, enum TLB_FLUSH_TYPE flush_type) + uint16_t pasid, + enum TLB_FLUSH_TYPE flush_type, + uint32_t inst) { bool all_hub = false; @@ -751,7 +753,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, adev->family == AMDGPU_FAMILY_RV) all_hub = true; - return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); + return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst); } bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 01ba3589b60a..df07e212c21e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -160,7 +160,8 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, uint16_t vmid); int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, enum TLB_FLUSH_TYPE flush_type); + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type, + uint32_t inst); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 6d105d7fb98b..572ef5be539f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -119,7 +119,8 @@ struct amdgpu_gmc_funcs { uint32_t vmhub, uint32_t flush_type); /* flush the vm tlb via pasid */ int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, - uint32_t flush_type, bool all_hub); + uint32_t flush_type, bool all_hub, + uint32_t inst); /* flush the vm tlb via ring */ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); @@ -296,9 +297,9 @@ struct amdgpu_gmc { }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) -#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \ +#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub, inst) \ ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \ - ((adev), (pasid), (type), (allhub))) + ((adev), (pasid), (type), (allhub), (inst))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index ff96f11c2adf..d76f5c8d4977 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -419,7 +419,7 @@ error_alloc: */ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, - bool all_hub) + bool all_hub, uint32_t inst) { int vmid, i; signed long r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 3453f1c0e066..4bf807d825c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -324,7 +324,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, - bool all_hub) + bool all_hub, uint32_t inst) { int vmid, i; signed long r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 81609a2b226f..6f53049619cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -424,7 +424,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) */ static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, - bool all_hub) + bool all_hub, uint32_t inst) { int vmid; unsigned int tmp; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d48e33738a88..48475077ca92 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -622,7 +622,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) */ static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, - bool all_hub) + bool all_hub, uint32_t inst) { int vmid; unsigned int tmp; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6da85365e5aa..0163a761ccf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -924,7 +924,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, - bool all_hub) + bool all_hub, uint32_t inst) { int vmid, i; signed long r; @@ -932,8 +932,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t queried_pasid; bool ret; u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; if (amdgpu_in_reset(adev)) return -EIO; @@ -953,7 +953,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (vega20_xgmi_wa) ndw += kiq->pmf->invalidate_tlbs_size; - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&adev->gfx.kiq[inst].ring_lock); /* 2 dwords flush + 8 dwords fence */ amdgpu_ring_alloc(ring, ndw); if (vega20_xgmi_wa) @@ -964,13 +964,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) { amdgpu_ring_undo(ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); up_read(&adev->reset_domain->sem); return -ETIME; } amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 888590dfa646..9b1e84d33cdc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -2052,6 +2052,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); struct kfd_node *dev = pdd->dev; + int xcc = 0; /* * It can be that we race and lose here, but that is extremely unlikely @@ -2069,8 +2070,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, - pdd->process->pasid, type); + for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) + amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, + pdd->process->pasid, type, + dev->start_xcc_id + xcc); } } From 7fe51e6fd2368b358441a1f6e0c94f4cd7e0720f Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:25:25 -0400 Subject: [PATCH 277/861] drm/amdkfd: Update context save handling for multi XCC setup (v2) Context save handling needs to be updated for a multi XCC setup: - On a multi XCC setup, KFD needs to report context save base address and size for each XCC in MQD. - Thunk will allocate a large context save area covering all XCCs which will be equal to: num_of_xccs in a partition * size of context save area for 1 XCC. However, it will report only the size of context save area for 1 XCC only in the ioctl call. - Driver then setups the MQD correctly using the size passed from Thunk and information about number of XCCs in a partition. - Update get_wave_state function to return context save area for all XCCs in the partition. v2: update the get_wave_state function for mqd manager v11 (Morris) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 1 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 1 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 1 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 62 ++++++++++++++++++- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 1 + 6 files changed, 67 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 33a9394f9e58..01ea038cab6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2095,8 +2095,8 @@ static int get_wave_state(struct device_queue_manager *dqm, * and the queue should be protected against destruction by the process * lock. */ - return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, - ctl_stack_used_size, save_area_used_size); + return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties, + ctl_stack, ctl_stack_used_size, save_area_used_size); } static void get_queue_checkpoint_info(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index eb18be74f559..23158db7da03 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -97,6 +97,7 @@ struct mqd_manager { uint32_t queue_id); int (*get_wave_state)(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index d54c6fdebbb6..772c09b5821b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -227,6 +227,7 @@ static uint32_t read_doorbell_id(void *mqd) } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 338d639c1898..632344b95d90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -260,6 +260,7 @@ static uint32_t read_doorbell_id(void *mqd) } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index b53cd8f9620b..5c9b3392758e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -295,6 +295,7 @@ static uint32_t read_doorbell_id(void *mqd) } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) @@ -561,6 +562,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, int xcc = 0; struct kfd_mem_obj xcc_mqd_mem_obj; uint64_t xcc_gart_addr = 0; + uint64_t xcc_ctx_save_restore_area_address; uint64_t offset = mm->mqd_stride(mm, q); memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); @@ -570,6 +572,23 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); m->cp_mqd_stride_size = offset; + + /* + * Update the CWSR address for each XCC if CWSR is enabled + * and CWSR area is allocated in thunk + */ + if (mm->dev->kfd->cwsr_enabled && + q->ctx_save_restore_area_address) { + xcc_ctx_save_restore_area_address = + q->ctx_save_restore_area_address + + (xcc * q->ctx_save_restore_area_size); + + m->cp_hqd_ctx_save_base_addr_lo = + lower_32_bits(xcc_ctx_save_restore_area_address); + m->cp_hqd_ctx_save_base_addr_hi = + upper_32_bits(xcc_ctx_save_restore_area_address); + } + if (q->format == KFD_QUEUE_FORMAT_AQL) { m->compute_tg_chunk_size = 1; @@ -689,6 +708,46 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, return err; } +static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size) +{ + int xcc, err = 0; + void *xcc_mqd; + void __user *xcc_ctl_stack; + uint64_t mqd_stride_size = mm->mqd_stride(mm, q); + u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0; + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + mqd_stride_size * xcc; + xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack + + q->ctx_save_restore_area_size * xcc); + + err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack, + &tmp_ctl_stack_used_size, + &tmp_save_area_used_size); + if (err) + break; + + /* + * Set the ctl_stack_used_size and save_area_used_size to + * ctl_stack_used_size and save_area_used_size of XCC 0 when + * passing the info the user-space. + * For multi XCC, user-space would have to look at the header + * info of each Control stack area to determine the control + * stack size and save area used. + */ + if (xcc == 0) { + *ctl_stack_used_size = tmp_ctl_stack_used_size; + *save_area_used_size = tmp_save_area_used_size; + } + } + + return err; +} + #if defined(CONFIG_DEBUG_FS) static int debugfs_show_mqd(struct seq_file *m, void *data) @@ -726,7 +785,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->allocate_mqd = allocate_mqd; mqd->free_mqd = kfd_free_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; - mqd->get_wave_state = get_wave_state; mqd->get_checkpoint_info = get_checkpoint_info; mqd->checkpoint_mqd = checkpoint_mqd; mqd->restore_mqd = restore_mqd; @@ -740,11 +798,13 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->load_mqd = load_mqd_v9_4_3; mqd->update_mqd = update_mqd_v9_4_3; mqd->destroy_mqd = destroy_mqd_v9_4_3; + mqd->get_wave_state = get_wave_state_v9_4_3; } else { mqd->init_mqd = init_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd; mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->get_wave_state = get_wave_state; } break; case KFD_MQD_TYPE_HIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index ebf963f42b51..fe69492b1bb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -261,6 +261,7 @@ static void update_mqd_tonga(struct mqd_manager *mm, void *mqd, } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) From 21e1217b4c0e0234704d50ea303c7603266604ac Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:30:57 -0400 Subject: [PATCH 278/861] drm/amdgpu: Fix VM fault reporting on XCC1 Fix VM fault reporting and clear VM fault register for XCC1. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0163a761ccf0..681bc9d354fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,6 +557,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; + uint32_t node_id; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; @@ -611,7 +612,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + node_id = (adev->ip_versions[GC_HWIP][0] == + IP_VERSION(9, 4, 3)) ? entry->node_id : 0; + hub = &adev->vmhub[node_id/2]; } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); @@ -645,11 +648,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); - dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - if (hub == &adev->vmhub[AMDGPU_GFXHUB(0)]) { + if (entry->vmid_src == AMDGPU_GFXHUB(0)) { dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], From 5e4060123687c4f2c9fb855874f77b14f07526d6 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:34:38 -0400 Subject: [PATCH 279/861] drm/amdkfd: Call DQM stop during DQM uninitialize During DQM tear down, call DQM stop to unitialize HIQ and associated memory allocated during packet manager init. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 01ea038cab6d..f78c1e7aad57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1348,9 +1348,16 @@ static int start_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm) { + dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (dqm->dev->adev->asic_type == CHIP_HAWAII) pm_uninit(&dqm->packet_mgr, false); dqm->sched_running = false; + dqm_unlock(dqm); return 0; } @@ -2423,6 +2430,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, void device_queue_manager_uninit(struct device_queue_manager *dqm) { + dqm->ops.stop(dqm); dqm->ops.uninitialize(dqm); if (!dqm->dev->kfd->shared_resources.enable_mes) deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); From f38f147ab3121adbd7510a82e6eb0b41a356c26e Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:35:55 -0400 Subject: [PATCH 280/861] drm/amdkfd: Update sysfs node properties for multi XCC Update simd_count and array_count node properties to report values multiplied by number of XCCs in the partition. Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 94af37df3ed2..6d958bf0fe90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -468,7 +468,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count", dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, offs, "simd_count", - dev->gpu ? dev->node_props.simd_count : 0); + dev->gpu ? (dev->node_props.simd_count * + dev->gpu->num_xcc_per_node) : 0); sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", dev->node_props.mem_banks_count); sysfs_show_32bit_prop(buffer, offs, "caches_count", @@ -492,7 +493,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "wave_front_size", dev->node_props.wave_front_size); sysfs_show_32bit_prop(buffer, offs, "array_count", - dev->node_props.array_count); + dev->gpu ? (dev->node_props.array_count * + dev->gpu->num_xcc_per_node) : 0); sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine", dev->node_props.simd_arrays_per_engine); sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array", From a805889a15315f7fa78c1c4bb2f1875c7c43f919 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:52:39 -0400 Subject: [PATCH 281/861] drm/amdkfd: Update SDMA queue management for GFX9.4.3 This patch updates SDMA queue management for multi XCC in GFX9.4.3. - Allocate/deallocate SDMA queues from the correct SDMA engines based on the partition mode. - Updates the kgd2kfd interface to fetch the correct SDMA register addresses. - It also fixes dumping correct SDMA queue info in debugfs. v2: squash in fix "drm/amdkfd: Fix XGMI SDMA user-mode queue allocation" Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 194 +++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 59 +++--- .../drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 + 5 files changed, 227 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 49d8087e469e..e81bdca53f42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -31,6 +31,192 @@ #include "oss/osssys_4_0_sh_mask.h" #include "v9_structs.h" #include "soc15.h" +#include "sdma/sdma_4_4_2_offset.h" +#include "sdma/sdma_4_4_2_sh_mask.h" + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, engine_id, + regSDMA_RLC0_RB_CNTL) - + regSDMA_RLC0_RB_CNTL; + uint32_t retval = sdma_engine_reg_base + + queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); + return retval; +} + +int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (data & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, data); + + return 0; +} + +int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+12) +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = regSDMA_RLC0_RB_CNTL; reg <= regSDMA_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_STATUS; reg <= regSDMA_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_IB_SUB_REMAIN; + reg <= regSDMA_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_MIDCMD_DATA0; + reg <= regSDMA_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, + unsigned int utimeout) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + temp = temp & ~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (temp & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL) | + SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI); + + return 0; +} static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid, uint32_t inst) @@ -166,13 +352,13 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_4_3_hqd_load, .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, - .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, + .hqd_sdma_load = kgd_gfx_v9_4_3_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, - .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, + .hqd_sdma_dump = kgd_gfx_v9_4_3_hqd_sdma_dump, .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, + .hqd_sdma_is_occupied = kgd_gfx_v9_4_3_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, - .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, + .hqd_sdma_destroy = kgd_gfx_v9_4_3_hqd_sdma_destroy, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 37c6dc5c37bf..ec5f85ff34e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -741,6 +741,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (!node) goto node_alloc_error; + node->node_id = i; node->adev = kfd->adev; node->kfd = kfd; node->kfd2kgd = kfd->kfd2kgd; @@ -1323,15 +1324,16 @@ unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ if (!node->adev->gmc.xgmi.supported) - return node->adev->sdma.num_instances; + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; - return min(node->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); } unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - + kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f78c1e7aad57..69419a53a14e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -124,6 +124,15 @@ static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manag return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } +static void init_sdma_bitmaps(struct device_queue_manager *dqm) +{ + bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm)); + + bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); +} + void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1268,24 +1277,6 @@ static void init_interrupts(struct device_queue_manager *dqm) } } -static void init_sdma_bitmaps(struct device_queue_manager *dqm) -{ - unsigned int num_sdma_queues = - min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8, - get_num_sdma_queues(dqm)); - unsigned int num_xgmi_sdma_queues = - min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8, - get_num_xgmi_sdma_queues(dqm)); - - if (num_sdma_queues) - dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0); - if (num_xgmi_sdma_queues) - dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0); - - dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm); - pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap); -} - static int initialize_nocpsch(struct device_queue_manager *dqm) { int pipe, queue; @@ -1375,46 +1366,49 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (dqm->sdma_bitmap == 0) { + if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { /* Find first available sdma_id */ - bit = __ffs64(dqm->sdma_bitmap); - dqm->sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->sdma_bitmap, + get_num_sdma_queues(dqm)); + clear_bit(bit, dqm->sdma_bitmap); q->sdma_id = bit; } - q->properties.sdma_engine_id = q->sdma_id % - kfd_get_num_sdma_engines(dqm->dev); + q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + + q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (dqm->xgmi_sdma_bitmap == 0) { + if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more XGMI SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { - bit = __ffs64(dqm->xgmi_sdma_bitmap); - dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->xgmi_sdma_bitmap, + get_num_xgmi_sdma_queues(dqm)); + clear_bit(bit, dqm->xgmi_sdma_bitmap); q->sdma_id = bit; } /* sdma_engine_id is sdma id including @@ -1424,6 +1418,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * PCIe-optimized ones */ q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + kfd_get_num_sdma_engines(dqm->dev) + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / @@ -1442,11 +1437,11 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->sdma_id >= get_num_sdma_queues(dqm)) return; - dqm->sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->sdma_bitmap); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) return; - dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e554a48f3054..b11c474d4067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -239,8 +239,8 @@ struct device_queue_manager { unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; - uint64_t sdma_bitmap; - uint64_t xgmi_sdma_bitmap; + DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES); + DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); /* the pasid mapping for each kfd vmid */ uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1337fcdf8958..5cfebcc8b305 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -113,6 +113,8 @@ #define KFD_UNMAP_LATENCY_MS (4000) +#define KFD_MAX_SDMA_QUEUES 128 + /* * 512 = 0x200 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the @@ -260,6 +262,7 @@ struct kfd_vmid_info { struct kfd_dev; struct kfd_node { + unsigned int node_id; struct amdgpu_device *adev; /* Duplicated here along with keeping * a copy in kfd_dev to save a hop */ From a8027fcd08f9127d38edeb59600ecb76c56a121a Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 2 Mar 2022 21:40:38 -0500 Subject: [PATCH 282/861] drm/amdgpu: Fix CP_HYP_XCP_CTL register programming in CPX mode Currently, in CPX mode, the CP_HYP_XCP_CTL register is programmed incorrectly with the number of XCCs in the partition. As a result, HIQ doesn't work in CPX mode. Fix this by programming the correct number of XCCs in a partition, which is 1, in CPX mode. Signed-off-by: Mukul Joshi Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 232feb387a40..1dcb69b4816f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1161,7 +1161,7 @@ static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) break; case 2: tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID); - tmp = tmp | (adev->gfx.num_xcd << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); + tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, tmp); tmp = xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, PHYSICAL_XCC_ID); From ef75a6ef37235e211bbdb17c25e5f79c55df1750 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Thu, 3 Mar 2022 10:56:05 -0500 Subject: [PATCH 283/861] drm/amdkfd: Update coherence settings for svm ranges Recently introduced commit "drm/amdgpu: Set cache coherency for GC 9.4.3" did not update the settings applicable for svm ranges. Add the coherence settings for svm ranges for GFX IP 9.4.3. Reviewed-by: Amber Lin Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 17 +++++++++++++++++ include/uapi/linux/kfd_ioctl.h | 2 ++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 96ccff79902c..4b4f3bf8b823 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1159,6 +1159,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, uint64_t pte_flags; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; + bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); @@ -1198,6 +1199,22 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; + case IP_VERSION(9, 4, 3): + //TODO: Need more work for handling multiple memory partitions + //e.g. NPS4. Current approch is only applicable without memory + //partitions. + snoop = true; + if (uncached) + mapping_flags |= AMDGPU_VM_MTYPE_UC; + /* local HBM region close to partition*/ + else if (bo_adev == adev) + mapping_flags |= AMDGPU_VM_MTYPE_RW; + /* local HBM region far from partition or remote XGMI GPU or + * system memory + */ + else + mapping_flags |= AMDGPU_VM_MTYPE_NC; + break; default: mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2da5c3ad71bd..2a9671e1ddb5 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -623,6 +623,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 +/* Uncached access to memory */ +#define KFD_IOCTL_SVM_FLAG_UNCACHED 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations From 5db392a04575120de4e73ee10c0dc727426100c7 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 10 Mar 2022 16:45:53 +0530 Subject: [PATCH 284/861] drm/amdgpu: Use new atomfirmware init for GC 9.4.3 Use the new atomfirmware initialization logic for GC 9.4.3 based ASICs also. ASIC init logic doesn't consider boot clocks during init. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 82a3d0ff6560..54be85539ecb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -942,7 +942,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) { amdgpu_asic_pre_asic_init(adev); - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) || + adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) return amdgpu_atomfirmware_asic_init(adev, true); else return amdgpu_atom_asic_init(adev->mode_info.atom_context); From d25555866172a0454b0dc1374b0ff29e1ae5f676 Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Wed, 23 Mar 2022 11:01:52 -0400 Subject: [PATCH 285/861] drm/amdgpu: Set XNACK per process on GC 9.4.3 Set RETRY_PERMISSION_OR_INVALID_PAGE_FAULT bit in VM_CONTEXT1_CNTL as well so XNACK can be enabled in the SQ per process. Signed-off-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index e5016fea1f28..d74621662311 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -328,13 +328,15 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) PAGE_TABLE_BLOCK_SIZE, block_size); /* Send no-retry XNACK on fault to suppress VM fault storm. - * On Aldebaran, XNACK can be enabled in the SQ per-process. + * On 9.4.2 and 9.4.3, XNACK can be enabled in + * the SQ per-process. * Retry faults need to be enabled for that to work. */ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, !adev->gmc.noretry || - adev->asic_type == CHIP_ALDEBARAN); + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)); WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); WREG32_SOC15_OFFSET(GC, j, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 9ec06f9db761..3883758b7993 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -288,7 +288,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, block_size); - /* On Aldebaran, XNACK can be enabled in the SQ per-process. + /* On 9.4.3, XNACK can be enabled in the SQ per-process. * Retry faults need to be enabled for that to work. */ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, From 0ee20b86966cff359f51051a56f8c2d89b09aed4 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 28 Feb 2022 19:30:10 +0800 Subject: [PATCH 286/861] drm/amdgpu: assign the doorbell index in 1st page to sdma page queue Previously for vega10, the sdma_doorbell_range is only enough for sdma gfx queue, thus the index on second doorbell page is allocated for sdma page queue. From vega20, the sdma_doorbell_range on 1st page is enlarged. Therefore, just leverage these index instead of allocation on 2nd page. v2: change "(x << 1) + 2" to "(x + 1) << 1" for readability and add comments. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 14 ++++++++++++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 7 +++---- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 54be85539ecb..38b6bbcaf186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1091,7 +1091,8 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) * doorbells are in the first page. So with paging queue enabled, * the max num_kernel_doorbells should + 1 page (0x400 in dword) */ - if (adev->asic_type >= CHIP_VEGA10) + if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) && + adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) adev->doorbell.num_kernel_doorbells += 0x400; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 50b6eb9bcfda..70b0d1fd9868 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1847,8 +1847,18 @@ static int sdma_v4_0_sw_init(void *handle) /* paging queue use same doorbell index/routing as gfx queue * with 0x400 (4096 dwords) offset on second doorbell page */ - ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; - ring->doorbell_index += 0x400; + if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) && + adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) { + ring->doorbell_index = + adev->doorbell_index.sdma_engine[i] << 1; + ring->doorbell_index += 0x400; + } else { + /* From vega20, the sdma_doorbell_range in 1st + * doorbell page is reserved for page queue. + */ + ring->doorbell_index = + (adev->doorbell_index.sdma_engine[i] + 1) << 1; + } if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) ring->vm_hub = AMDGPU_MMHUB1(0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 7efe7c43fffb..441d6911fd20 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1323,11 +1323,10 @@ static int sdma_v4_4_2_sw_init(void *handle) ring->ring_obj = NULL; ring->use_doorbell = true; - /* paging queue use same doorbell index/routing as gfx queue - * with 0x400 (4096 dwords) offset on second doorbell page + /* doorbell index of page queue is assigned right after + * gfx queue on the same instance */ - ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; - ring->doorbell_index += 0x400; + ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] + 1) << 1; ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); From 386ea27c3b0bcdd5b5be74bdf26022ab931eae7b Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 23 Feb 2022 11:43:01 +0800 Subject: [PATCH 287/861] drm/amdgpu: adjust some basic elements for multiple AID case add some elements below: - num_aid - aid_id for each sdma instance - num_inst_per_aid for sdma and extend macro size below: - SDMA_MAX_INSTANCES to 16 - AMDGPU_MAX_RINGS to 96 - AMDGPU_MAX_HWIP_RINGS to 32 v2: move aid_id from amdgpu_ring to amdgpu_sdma_instance. (Lijo) Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 4 +++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 00c52caeebeb..5f4396185a2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1051,6 +1051,7 @@ struct amdgpu_device { bool job_hang; bool dc_enabled; + uint32_t num_aid; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index b0dc0a0c2631..8f4b416a92e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -37,8 +37,8 @@ struct amdgpu_job; struct amdgpu_vm; /* max number of rings */ -#define AMDGPU_MAX_RINGS 28 -#define AMDGPU_MAX_HWIP_RINGS 8 +#define AMDGPU_MAX_RINGS 96 +#define AMDGPU_MAX_HWIP_RINGS 32 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_SW_GFX_RINGS 2 #define AMDGPU_MAX_COMPUTE_RINGS 8 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index fc8528812598..67975dcede5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -26,7 +26,7 @@ #include "amdgpu_ras.h" /* max number of IP instances */ -#define AMDGPU_MAX_SDMA_INSTANCES 8 +#define AMDGPU_MAX_SDMA_INSTANCES 16 enum amdgpu_sdma_irq { AMDGPU_SDMA_IRQ_INSTANCE0 = 0, @@ -49,6 +49,7 @@ struct amdgpu_sdma_instance { struct amdgpu_ring ring; struct amdgpu_ring page; bool burst_nop; + uint32_t aid_id; }; struct amdgpu_sdma_ras { @@ -66,6 +67,7 @@ struct amdgpu_sdma { struct amdgpu_irq_src srbm_write_irq; int num_instances; + int num_inst_per_aid; uint32_t srbm_soft_reset; bool has_page_queue; struct ras_common_if *ras_if; From f786b1d4ec778a5fc23911f06a0e38c9f4953b0c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 20 Apr 2022 17:03:00 +0800 Subject: [PATCH 288/861] drm/amdgpu: add support for SDMA on multiple AIDs Initialize SDMA instances on each AID. v2: revise coding fault in hw_fini Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 20 +++++++++++++------- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 67975dcede5d..632b77138fe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -37,6 +37,14 @@ enum amdgpu_sdma_irq { AMDGPU_SDMA_IRQ_INSTANCE5, AMDGPU_SDMA_IRQ_INSTANCE6, AMDGPU_SDMA_IRQ_INSTANCE7, + AMDGPU_SDMA_IRQ_INSTANCE8, + AMDGPU_SDMA_IRQ_INSTANCE9, + AMDGPU_SDMA_IRQ_INSTANCE10, + AMDGPU_SDMA_IRQ_INSTANCE11, + AMDGPU_SDMA_IRQ_INSTANCE12, + AMDGPU_SDMA_IRQ_INSTANCE13, + AMDGPU_SDMA_IRQ_INSTANCE14, + AMDGPU_SDMA_IRQ_INSTANCE15, AMDGPU_SDMA_IRQ_LAST }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 441d6911fd20..7deadea03caa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1253,9 +1253,10 @@ static int sdma_v4_4_2_sw_init(void *handle) struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 aid_id; /* SDMA trap event */ - for (i = 0; i < adev->sdma.num_instances; i++) { + for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), SDMA0_4_0__SRCID__SDMA_TRAP, &adev->sdma.trap_irq); @@ -1264,7 +1265,7 @@ static int sdma_v4_4_2_sw_init(void *handle) } /* SDMA SRAM ECC event */ - for (i = 0; i < adev->sdma.num_instances; i++) { + for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), SDMA0_4_0__SRCID__SDMA_SRAM_ECC, &adev->sdma.ecc_irq); @@ -1273,7 +1274,7 @@ static int sdma_v4_4_2_sw_init(void *handle) } /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ - for (i = 0; i < adev->sdma.num_instances; i++) { + for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), SDMA0_4_0__SRCID__SDMA_VM_HOLE, &adev->sdma.vm_hole_irq); @@ -1303,15 +1304,17 @@ static int sdma_v4_4_2_sw_init(void *handle) ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; + aid_id = adev->sdma.instance[i].aid_id; DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); /* doorbell size is 2 dwords, get DWORD offset */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; - ring->vm_hub = AMDGPU_MMHUB0(0); + ring->vm_hub = AMDGPU_MMHUB0(aid_id); - sprintf(ring->name, "sdma%d", i); + sprintf(ring->name, "sdma%d.%d", aid_id, + i % adev->sdma.num_inst_per_aid); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -1327,9 +1330,10 @@ static int sdma_v4_4_2_sw_init(void *handle) * gfx queue on the same instance */ ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] + 1) << 1; - ring->vm_hub = AMDGPU_MMHUB0(0); + ring->vm_hub = AMDGPU_MMHUB0(aid_id); - sprintf(ring->name, "page%d", i); + sprintf(ring->name, "page%d.%d", aid_id, + i % adev->sdma.num_inst_per_aid); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, @@ -1811,6 +1815,8 @@ static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) &sdma_v4_4_2_page_ring_funcs; adev->sdma.instance[i].page.me = i; } + + adev->sdma.instance[i].aid_id = i / adev->sdma.num_inst_per_aid; } } From 2a47a2d90e5cd96c24503061c8920a1e6ee248a0 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 1 Mar 2022 19:42:29 +0800 Subject: [PATCH 289/861] drm/amdgpu: assign the doorbell index for sdma on non-AID0 Allocate new sdma doorbell index for the instances only on AID1 for now. Todo: there's limitation that SDMA doorbell index on SDMA 4.4.2 needs to be less than 0x1FF, so the tail part in _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT is not enough to store sdma doorbell range on maximum 4 AIDs if doorbell_range is 20. So it looks better to create a new doorbell index assignment table for 4.4.2. v2: change "(x << 1) + 2" to "(x + 1) << 1" for readability. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 6 +++++- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 18 ++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index ffb75d23d2fc..b036d2f01930 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -85,6 +85,7 @@ struct amdgpu_doorbell_index { uint32_t last_non_cp; uint32_t xcc1_kiq_start; uint32_t xcc1_mec_ring0_start; + uint32_t aid1_sdma_start; uint32_t max_assignment; /* Per engine SDMA doorbell size in dword */ uint32_t sdma_doorbell_range; @@ -171,7 +172,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT /* 8 compute rings per GC. Max to 0x1CE */ AMDGPU_VEGA20_DOORBELL_XCC1_MEC_RING0_START = 0x197, - AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1CE, + /* AID1 SDMA: 0x1D0 ~ 0x1F7 */ + AMDGPU_VEGA20_DOORBELL_AID1_sDMA_START = 0x1D0, + + AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1F7, AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF } AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 7deadea03caa..6935a24d1e89 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1310,7 +1310,14 @@ static int sdma_v4_4_2_sw_init(void *handle) ring->use_doorbell?"true":"false"); /* doorbell size is 2 dwords, get DWORD offset */ - ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; + if (aid_id > 0) + ring->doorbell_index = + (adev->doorbell_index.aid1_sdma_start << 1) + + adev->doorbell_index.sdma_doorbell_range + * (i - adev->sdma.num_inst_per_aid); + else + ring->doorbell_index = + adev->doorbell_index.sdma_engine[i] << 1; ring->vm_hub = AMDGPU_MMHUB0(aid_id); sprintf(ring->name, "sdma%d.%d", aid_id, @@ -1329,7 +1336,14 @@ static int sdma_v4_4_2_sw_init(void *handle) /* doorbell index of page queue is assigned right after * gfx queue on the same instance */ - ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] + 1) << 1; + if (aid_id > 0) + ring->doorbell_index = + ((adev->doorbell_index.aid1_sdma_start + 1) << 1) + + adev->doorbell_index.sdma_doorbell_range + * (i - adev->sdma.num_inst_per_aid); + else + ring->doorbell_index = + (adev->doorbell_index.sdma_engine[i] + 1) << 1; ring->vm_hub = AMDGPU_MMHUB0(aid_id); sprintf(ring->name, "page%d.%d", aid_id, From 3a1083873ba7730970665d04c33680f96b27e3b4 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 2 Mar 2022 17:33:24 +0800 Subject: [PATCH 290/861] drm/amdgpu: do mmhub init for multiple AIDs Mmhub on each AID needs to be initialized respectively Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 577 ++++++++++++++---------- 1 file changed, 332 insertions(+), 245 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 3883758b7993..67338cb3d7bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -53,18 +53,27 @@ static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + struct amdgpu_vmhub *hub; + int i; - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); + for (i = 0; i < adev->num_aid; i++) { + hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; + WREG32_SOC15_OFFSET(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); + WREG32_SOC15_OFFSET(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); + } } static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; + int i; if (adev->gmc.pdb0_bo) pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); @@ -76,27 +85,37 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - if (adev->gmc.pdb0_bo) { - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + for (i = 0; i < adev->num_aid; i++) { + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.fb_start >> 12)); + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.fb_start >> 44)); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->gmc.gart_end >> 44)); + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); - } else { - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.gart_start >> 12)); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.gart_start >> 44)); + } else { + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->gmc.gart_end >> 44)); + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, i, + regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } } } @@ -104,159 +123,202 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; uint32_t tmp; + int i; - /* Program the AGP BAR */ - WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + for (i = 0; i < adev->num_aid; i++) { + /* Program the AGP BAR */ + WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, + adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, + adev->gmc.agp_end >> 24); - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + if (amdgpu_sriov_vf(adev)) + return; - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - /* In the case squeezing vram into GART aperture, we don't use - * FB aperture and AGP aperture. Disable them. - */ - if (adev->gmc.pdb0_bo) { - WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF); - WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0); - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0); - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* In the case squeezing vram into GART aperture, we don't use + * FB aperture and AGP aperture. Disable them. + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0); + WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE, + 0x00FFFFFF); + WREG32_SOC15(MMHUB, i, + regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + 0x3FFFFFFF); + WREG32_SOC15(MMHUB, i, + regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + } + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); + WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(MMHUB, i, + regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(MMHUB, i, + regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); } - if (amdgpu_sriov_vf(adev)) - return; - - /* Set default page address. */ - value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); - - /* Program "protection fault". */ - WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, - (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, - (u32)((u64)adev->dummy_page_addr >> 44)); - - tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); - WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); } static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) { uint32_t tmp; + int i; /* Setup TLB control */ - tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); + for (i = 0; i < adev->num_aid; i++) { + tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC);/* XXX for emulation. */ - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, + 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); - WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); + } } static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + int i; if (amdgpu_sriov_vf(adev)) return; /* Setup L2 cache */ - tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); - /* XXX for emulation, Refer to closed source code.*/ - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, - 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); - WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp); + for (i = 0; i < adev->num_aid; i++) { + tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, + ENABLE_L2_FRAGMENT_PROCESSING, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, + L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, + 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, + CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, + IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp); - tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); - WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp); + tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, + 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp); - tmp = regVM_L2_CNTL3_DEFAULT; - if (adev->gmc.translate_further) { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, - L2_CACHE_BIGK_FRAGMENT_SIZE, 9); - } else { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, - L2_CACHE_BIGK_FRAGMENT_SIZE, 6); - } - WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp); + tmp = regVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp); - tmp = regVM_L2_CNTL4_DEFAULT; - if (adev->gmc.xgmi.connected_to_cpu) { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, - VMC_TAP_PDE_REQUEST_PHYSICAL, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, - VMC_TAP_PTE_REQUEST_PHYSICAL, 1); - } else { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, - VMC_TAP_PDE_REQUEST_PHYSICAL, 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, - VMC_TAP_PTE_REQUEST_PHYSICAL, 0); - } - WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp); + tmp = regVM_L2_CNTL4_DEFAULT; + if (adev->gmc.xgmi.connected_to_cpu) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 1); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + } + WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp); + } } static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) { uint32_t tmp; + int i; - tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, - adev->gmc.vmid0_page_table_depth); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, - adev->gmc.vmid0_page_table_block_size); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); - WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp); + for (i = 0; i < adev->num_aid; i++) { + tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, + adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, + VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp); + } } static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) { + int i; + if (amdgpu_sriov_vf(adev)) return; - WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); - WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 0x0000000F); + for (i = 0; i < adev->num_aid; i++) { + WREG32_SOC15(MMHUB, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0XFFFFFFFF); + WREG32_SOC15(MMHUB, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); - WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); - WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); + WREG32_SOC15(MMHUB, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, + 0); + WREG32_SOC15(MMHUB, i, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, + 0); - WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); - WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + WREG32_SOC15(MMHUB, i, + regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); + WREG32_SOC15(MMHUB, i, + regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + } } static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + struct amdgpu_vmhub *hub; unsigned num_level, block_size; uint32_t tmp; - int i; + int i, j; num_level = adev->vm_manager.num_level; block_size = adev->vm_manager.block_size; @@ -265,60 +327,73 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, - num_level); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, - 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - PAGE_TABLE_BLOCK_SIZE, - block_size); - /* On 9.4.3, XNACK can be enabled in the SQ per-process. - * Retry faults need to be enabled for that to work. - */ - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - 1); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, - i * hub->ctx_distance, tmp); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, - i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, - i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, - i * hub->ctx_addr_distance, - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, - i * hub->ctx_addr_distance, - upper_32_bits(adev->vm_manager.max_pfn - 1)); + for (j = 0; j < adev->num_aid; j++) { + hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, + i); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PAGE_TABLE_DEPTH, num_level); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + block_size); + /* On 9.4.3, XNACK can be enabled in the SQ + * per-process. Retry faults need to be enabled for + * that to work. + */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1); + WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(MMHUB, j, + regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, j, + regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, j, + regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, j, + regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } } } static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; - unsigned i; + struct amdgpu_vmhub *hub; + unsigned i, j; - for (i = 0; i < 18; ++i) { - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - i * hub->eng_addr_distance, 0xffffffff); - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - i * hub->eng_addr_distance, 0x1f); + for (j = 0; j < adev->num_aid; j++) { + hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; + for (i = 0; i < 18; ++i) { + WREG32_SOC15_OFFSET(MMHUB, j, + regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(MMHUB, j, + regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } } } @@ -352,28 +427,33 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + struct amdgpu_vmhub *hub; u32 tmp; - u32 i; + u32 i, j; /* Disable all tables */ - for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL, - i * hub->ctx_distance, 0); + for (j = 0; j < adev->num_aid; j++) { + hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); - /* Setup TLB control */ - tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - ENABLE_ADVANCED_DRIVER_MODEL, 0); - WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, + 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp); - if (!amdgpu_sriov_vf(adev)) { - /* Setup L2 cache */ - tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp); - WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0); + if (!amdgpu_sriov_vf(adev)) { + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, + 0); + WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp); + WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0); + } } } @@ -386,72 +466,79 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + int i; if (amdgpu_sriov_vf(adev)) return; - tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, - value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); - if (!value) { + for (i = 0; i < adev->num_aid; i++) { + tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - CRASH_ON_NO_RETRY_FAULT, 1); + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, - CRASH_ON_RETRY_FAULT, 1); - } + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } - WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp); + WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp); + } } static void mmhub_v1_8_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + struct amdgpu_vmhub *hub; + int i; - hub->ctx0_ptb_addr_lo32 = - SOC15_REG_OFFSET(MMHUB, 0, - regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); - hub->ctx0_ptb_addr_hi32 = - SOC15_REG_OFFSET(MMHUB, 0, - regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); - hub->vm_inv_eng0_req = - SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ); - hub->vm_inv_eng0_ack = - SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK); - hub->vm_context0_cntl = - SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL); - hub->vm_l2_pro_fault_status = - SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS); - hub->vm_l2_pro_fault_cntl = - SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL); + for (i = 0; i < adev->num_aid; i++) { + hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; - hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; - hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ; - hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - - regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, 0, + regVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, 0, + regVM_L2_PROTECTION_FAULT_CNTL); + hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = + regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - + regVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + } } static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev, From ed42f2cc3b56955310a16da726886e684ed88432 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 24 Feb 2022 16:26:07 +0800 Subject: [PATCH 291/861] drm/amdgpu: correct the vmhub reference for each XCD in gfxhub init Correct this though the value is same across different vmhub. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index d74621662311..d3424ce97aa8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -42,10 +42,11 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + struct amdgpu_vmhub *hub; int i; for (i = 0; i < adev->gfx.num_xcd; i++) { + hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; WREG32_SOC15_OFFSET(GC, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -291,7 +292,7 @@ static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + struct amdgpu_vmhub *hub; unsigned num_level, block_size; uint32_t tmp; int i, j; @@ -304,6 +305,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) block_size -= 9; for (j = 0; j < adev->gfx.num_xcd; j++) { + hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); @@ -359,10 +361,11 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + struct amdgpu_vmhub *hub; unsigned i, j; for (j = 0; j < adev->gfx.num_xcd; j++) { + hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); @@ -408,11 +411,12 @@ static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + struct amdgpu_vmhub *hub; u32 tmp; u32 i, j; for (j = 0; j < adev->gfx.num_xcd; j++) { + hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; /* Disable all tables */ for (i = 0; i < 16; i++) WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT0_CNTL, From 0d81101c190d1835f1bfba85dc3d65b9f9cacd68 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 25 Feb 2022 15:14:19 +0800 Subject: [PATCH 292/861] drm/amdgpu: complement the IH node_id table for multiple AIDs With different node_id, the SDMA interrupt from multiple AIDs can be distinguished by sw driver. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 6 +++++- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 031610c1340a..3dafaf70c987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -100,12 +100,16 @@ const char *soc15_ih_clientid_name[] = { }; const int node_id_to_phys_map[NODEID_MAX] = { + [AID0_NODEID] = 0, [XCD0_NODEID] = 0, [XCD1_NODEID] = 1, + [AID1_NODEID] = 1, [XCD2_NODEID] = 2, [XCD3_NODEID] = 3, + [AID2_NODEID] = 2, [XCD4_NODEID] = 4, [XCD5_NODEID] = 5, + [AID3_NODEID] = 3, [XCD6_NODEID] = 6, [XCD7_NODEID] = 7, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index efe8a278cbdf..04c0b4fa17a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -102,13 +102,17 @@ struct amdgpu_irq { bool retry_cam_enabled; }; -enum interrupt_node_id_per_xcp { +enum interrupt_node_id_per_aid { + AID0_NODEID = 0, XCD0_NODEID = 1, XCD1_NODEID = 2, + AID1_NODEID = 4, XCD2_NODEID = 5, XCD3_NODEID = 6, + AID2_NODEID = 8, XCD4_NODEID = 9, XCD5_NODEID = 10, + AID3_NODEID = 12, XCD6_NODEID = 13, XCD7_NODEID = 14, NODEID_MAX, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 6935a24d1e89..d3c7f9a43ef1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1492,6 +1492,9 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: SDMA trap\n"); instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + instance += node_id_to_phys_map[entry->node_id] * + adev->sdma.num_inst_per_aid; + switch (entry->ring_id) { case 0: amdgpu_fence_process(&adev->sdma.instance[instance].ring); From 5de6bd6a13f1c717279c870eb8290e466c8f6a80 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 25 Feb 2022 15:47:20 +0800 Subject: [PATCH 293/861] drm/amdgpu: set mmhub bitmask for multiple AIDs Like GFXHUB, set MMHUB0 bitmask for each AID. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 681bc9d354fe..59be0c0293c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1730,7 +1730,7 @@ static int gmc_v9_0_sw_init(void *handle) break; case IP_VERSION(9, 4, 3): bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), adev->gfx.num_xcd); - bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), 1); + bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), adev->num_aid); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; From feb36dd01403689140a42f906fc75769d3158400 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 18 Mar 2022 16:46:04 +0800 Subject: [PATCH 294/861] drm/amdgpu: convert the doorbell_index to 2 dwords offset for kiq KIQ doorbell_index is non-zero from XCC1, thus need to left-shift it like other rings. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 682c157f2d8f..49b18cf987aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -313,14 +313,13 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->adev = NULL; ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = adev->doorbell_index.kiq; ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB(xcc_id); if (xcc_id >= 1) - ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start + - xcc_id - 1; + ring->doorbell_index = (adev->doorbell_index.xcc1_kiq_start + + xcc_id - 1) << 1; else - ring->doorbell_index = adev->doorbell_index.kiq; + ring->doorbell_index = adev->doorbell_index.kiq << 1; r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) From 36be0181eab50abbb043a087988e6c2bef59dd45 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 20 Apr 2022 23:25:48 +0800 Subject: [PATCH 295/861] drm/amdgpu: program GRBM_MCM_ADDR for non-AID0 GRBM Otherwise the EOP interrupt on non-AID0 cannot route to IH0. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 1dcb69b4816f..a9fab8de29e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -184,7 +184,10 @@ static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) { + int i; + for (i = 2; i < adev->gfx.num_xcd; i++) + WREG32_SOC15(GC, i, regGRBM_MCM_ADDR, 0x4); } static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, From 20bedf1379b1d4b060e3f43661f35e5447d0cfed Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 25 Apr 2022 22:19:58 +0800 Subject: [PATCH 296/861] drm/amdgpu: introduce new doorbell assignment table for GC 9.4.3 Four basic reasons as below to do the change: 1. number of ring expand a lot on GC 9.4.3, and adjustment on old assignment cannot make each ring in a continuous doorbell space. 2. the SDMA doorbell index should not exceed 0x1FF on SDMA 4.2.2 due to regDOORBELLx_CTRL_ENTRY.BIF_DOORBELLx_RANGE_OFFSET_ENTRY field width. 3. re-design the doorbell assignment and unify the calculation as "start + ring/inst id" will make the code much concise. 4. only defining the START/END makes the table look simple v2: (Lijo) 1. replace name 2. use num_inst_per_aid/sdma_doorbell_range instead of hardcoding Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 32 +++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 6 +--- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 8 +---- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 19 ++---------- 4 files changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index b036d2f01930..a29a018ec84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -59,7 +59,7 @@ struct amdgpu_doorbell_index { uint32_t gfx_ring1; uint32_t gfx_userqueue_start; uint32_t gfx_userqueue_end; - uint32_t sdma_engine[8]; + uint32_t sdma_engine[16]; uint32_t mes_ring0; uint32_t mes_ring1; uint32_t ih; @@ -83,9 +83,6 @@ struct amdgpu_doorbell_index { }; uint32_t first_non_cp; uint32_t last_non_cp; - uint32_t xcc1_kiq_start; - uint32_t xcc1_mec_ring0_start; - uint32_t aid1_sdma_start; uint32_t max_assignment; /* Per engine SDMA doorbell size in dword */ uint32_t sdma_doorbell_range; @@ -312,6 +309,33 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_INVALID = 0xFFFF } AMDGPU_DOORBELL64_ASSIGNMENT; +typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 +{ + /* KIQ: 0~7 for maximum 8 XCD */ + AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000, + AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x008, + AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x009, + /* Compute: 0x0A ~ 0x49 */ + AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x00A, + AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x049, + AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x04A, + AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x0C9, + /* SDMA: 0x100 ~ 0x19F */ + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100, + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, + /* IH: 0x1A0 ~ 0x1AF */ + AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0, + /* VCN: 0x1B0 ~ 0x1C2 */ + AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0, + AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1C2, + + AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, + AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, + + AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1C2, + AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF +} AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1; + u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 49b18cf987aa..76438f197de1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -315,11 +315,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB(xcc_id); - if (xcc_id >= 1) - ring->doorbell_index = (adev->doorbell_index.xcc1_kiq_start + - xcc_id - 1) << 1; - else - ring->doorbell_index = adev->doorbell_index.kiq << 1; + ring->doorbell_index = (adev->doorbell_index.kiq + xcc_id) << 1; r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index a9fab8de29e8..52185b1d5d31 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -817,13 +817,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - if (xcc_id >= 1) - ring->doorbell_index = - (adev->doorbell_index.xcc1_mec_ring0_start + - ring_id - adev->gfx.num_compute_rings) << 1; - else - ring->doorbell_index = - (adev->doorbell_index.mec_ring0 + ring_id) << 1; + ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); ring->vm_hub = AMDGPU_GFXHUB(xcc_id); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index d3c7f9a43ef1..9b53174925f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1310,14 +1310,7 @@ static int sdma_v4_4_2_sw_init(void *handle) ring->use_doorbell?"true":"false"); /* doorbell size is 2 dwords, get DWORD offset */ - if (aid_id > 0) - ring->doorbell_index = - (adev->doorbell_index.aid1_sdma_start << 1) - + adev->doorbell_index.sdma_doorbell_range - * (i - adev->sdma.num_inst_per_aid); - else - ring->doorbell_index = - adev->doorbell_index.sdma_engine[i] << 1; + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->vm_hub = AMDGPU_MMHUB0(aid_id); sprintf(ring->name, "sdma%d.%d", aid_id, @@ -1336,14 +1329,8 @@ static int sdma_v4_4_2_sw_init(void *handle) /* doorbell index of page queue is assigned right after * gfx queue on the same instance */ - if (aid_id > 0) - ring->doorbell_index = - ((adev->doorbell_index.aid1_sdma_start + 1) << 1) - + adev->doorbell_index.sdma_doorbell_range - * (i - adev->sdma.num_inst_per_aid); - else - ring->doorbell_index = - (adev->doorbell_index.sdma_engine[i] + 1) << 1; + ring->doorbell_index = + (adev->doorbell_index.sdma_engine[i] + 1) << 1; ring->vm_hub = AMDGPU_MMHUB0(aid_id); sprintf(ring->name, "page%d.%d", aid_id, From 92085240ef9c0ec60c27a60b3cc0d4f5266fa511 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 3 May 2022 10:16:46 -0400 Subject: [PATCH 297/861] drm/amdkfd: add gpu compute cores io links for gfx9.4.3 The PSP TA will only provide xGMI topology info for links between GPU sockets so links between partitions from different sockets will be hardcoded as 3 xGMI hops with 1 hops weighted as xGMI and 2 hops weighted with a new intra-socket weight to indicate the longest possible distance. If the link between a partition and the CPU is non-PCIe, then assume the CPU (CCDs) is located within the same socket as the partition and represent the link as an intra-socket weighted single hop XGMI link with memory bandwidth. Links between partitions within a single socket will be abstracted as single hop xGMI links weighted with the new intra-socket weight and will have memory bandwidth. Finally, use the unused function bits in the location ID to represent the coordinates of the compute partition within its socket. A follow on patch will resolve the requirement for GPU socket xGMI link representation sometime later. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 49 ++++++++++++++++------- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 ++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++ 4 files changed, 47 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index f5aebba31e88..dc93a67257e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1166,7 +1166,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) props->weight = 20; else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI) - props->weight = 15 * iolink->num_hops_xgmi; + props->weight = iolink->weight_xgmi; else props->weight = node_distance(id_from, id_to); @@ -1972,6 +1972,9 @@ static void kfd_find_numa_node_in_srat(struct kfd_node *kdev) } #endif +#define KFD_CRAT_INTRA_SOCKET_WEIGHT 13 +#define KFD_CRAT_XGMI_WEIGHT 15 + /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU * to its NUMA node * @avail_size: Available size in the memory @@ -2003,6 +2006,12 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, * TODO: Fill-in other fields of iolink subtype */ if (kdev->adev->gmc.xgmi.connected_to_cpu) { + bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3); + int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT : + KFD_CRAT_INTRA_SOCKET_WEIGHT; + uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( + kdev->adev, NULL, true) : mem_bw; + /* * with host gpu xgmi link, host can access gpu memory whether * or not pcie bar type is large, so always create bidirectional @@ -2010,14 +2019,9 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, */ sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; - sub_type_hdr->num_hops_xgmi = 1; - if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) { - sub_type_hdr->minimum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( - kdev->adev, NULL, true); - sub_type_hdr->maximum_bandwidth_mbs = - sub_type_hdr->minimum_bandwidth_mbs; - } + sub_type_hdr->weight_xgmi = weight; + sub_type_hdr->minimum_bandwidth_mbs = bandwidth; + sub_type_hdr->maximum_bandwidth_mbs = bandwidth; } else { sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS; sub_type_hdr->minimum_bandwidth_mbs = @@ -2050,6 +2054,8 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, uint32_t proximity_domain_from, uint32_t proximity_domain_to) { + bool use_ta_info = kdev->kfd->num_nodes == 1; + *avail_size -= sizeof(struct crat_subtype_iolink); if (*avail_size < 0) return -ENOMEM; @@ -2064,12 +2070,25 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->proximity_domain_from = proximity_domain_from; sub_type_hdr->proximity_domain_to = proximity_domain_to; - sub_type_hdr->num_hops_xgmi = - amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); - sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false); - sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; + + if (use_ta_info) { + sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT * + amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); + sub_type_hdr->maximum_bandwidth_mbs = + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, + peer_kdev->adev, false); + sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; + } else { + bool is_single_hop = kdev->kfd == peer_kdev->kfd; + int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT : + (2 * KFD_CRAT_INTRA_SOCKET_WEIGHT) + KFD_CRAT_XGMI_WEIGHT; + int mem_bw = 819200; + + sub_type_hdr->weight_xgmi = weight; + sub_type_hdr->maximum_bandwidth_mbs = is_single_hop ? mem_bw : 0; + sub_type_hdr->minimum_bandwidth_mbs = is_single_hop ? mem_bw : 0; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 3d0e533b93b9..fc719389b5d6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -275,7 +275,7 @@ struct crat_subtype_iolink { uint32_t maximum_bandwidth_mbs; uint32_t recommended_transfer_size; uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH - 1]; - uint8_t num_hops_xgmi; + uint8_t weight_xgmi; }; /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index ec5f85ff34e5..971a3aa3294a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -702,6 +702,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (amdgpu_use_xgmi_p2p) kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; + /* + * For GFX9.4.3, the KFD abstracts all partitions within a socket as + * xGMI connected in the topology so assign a unique hive id per + * device based on the pci device location if device is in PCIe mode. + */ + if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1) + kfd->hive_id = pci_dev_id(kfd->adev->pdev); + kfd->noretry = kfd->adev->gmc.noretry; /* If CRAT is broken, won't set iommu enabled */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 6d958bf0fe90..d3e70341dfad 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1926,7 +1926,11 @@ int kfd_topology_add_device(struct kfd_node *gpu) dev->node_props.capability |= ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & HSA_CAP_ASIC_REVISION_MASK); + dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); + if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3)) + dev->node_props.location_id |= dev->gpu->node_id; + dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); dev->node_props.max_engine_clk_fcompute = amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); From 643e40d4c06f8c887af1789c7bf8d279e9c8e4cf Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 May 2022 16:25:16 -0400 Subject: [PATCH 298/861] drm/amdkfd: Fix SDMA in CPX mode When creating a user-mode SDMA queue, CP FW expects driver to use/set virtual SDMA engine id in MAP_QUEUES packet instead of using the physical SDMA engine id. Each partition node's virtual SDMA number should start from 0. However, when allocating doorbell for the queue, KFD needs to allocate the doorbell from doorbell space corresponding to the physical SDMA engine id, otherwise the hwardware will not see the doorbell press. Signed-off-by: Mukul Joshi Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 69419a53a14e..2b5c4b2dd242 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -363,7 +363,16 @@ static int allocate_doorbell(struct qcm_process_device *qpd, */ uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; - uint32_t valid_id = idx_offset[q->properties.sdma_engine_id] + + /* + * q->properties.sdma_engine_id corresponds to the virtual + * sdma engine number. However, for doorbell allocation, + * we need the physical sdma engine id in order to get the + * correct doorbell offset. + */ + uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id * + get_num_all_sdma_engines(qpd->dqm) + + q->properties.sdma_engine_id] + (q->properties.sdma_queue_id & 1) * KFD_QUEUE_DOORBELL_MIRROR_OFFSET + (q->properties.sdma_queue_id >> 1); @@ -1388,7 +1397,6 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, } q->properties.sdma_engine_id = - dqm->dev->node_id * get_num_all_sdma_engines(dqm) + q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); @@ -1418,7 +1426,6 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * PCIe-optimized ones */ q->properties.sdma_engine_id = - dqm->dev->node_id * get_num_all_sdma_engines(dqm) + kfd_get_num_sdma_engines(dqm->dev) + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / @@ -2486,6 +2493,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) int pipe, queue; int r = 0, xcc; uint32_t inst; + uint32_t sdma_engine_start; if (!dqm->sched_running) { seq_puts(m, " Device is stopped\n"); @@ -2530,7 +2538,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { + sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); + for (pipe = sdma_engine_start; + pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm)); + pipe++) { for (queue = 0; queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; queue++) { From 1bd6dd21fcd53ac78a9018b96699ef1aa99a3e59 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 May 2022 16:31:28 -0400 Subject: [PATCH 299/861] drm/amdkfd: Add SDMA info for SDMA 4.4.2 Update SDMA queue information for SDMA 4.4.2. Signed-off-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 971a3aa3294a..8e5d785b8824 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -82,6 +82,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(4, 2, 0):/* VEGA20 */ case IP_VERSION(4, 2, 2):/* ARCTURUS */ case IP_VERSION(4, 4, 0):/* ALDEBARAN */ + case IP_VERSION(4, 4, 2): case IP_VERSION(5, 0, 0):/* NAVI10 */ case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ case IP_VERSION(5, 0, 2):/* NAVI14 */ From d1d22df174ae512c57374f517e346b608f61555c Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 7 Jun 2022 14:46:18 -0400 Subject: [PATCH 300/861] drm/amdkfd: Populate memory info before adding GPU node to topology The local memory info needs to be fetched before the GPU node is added to topology. Without this, the sysfs is incorrectly populated and the size is reported as 0. This was causing rocr tests to fail. This issue was caused because of a bad merge. Signed-off-by: Mukul Joshi Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8e5d785b8824..829e32433faf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -743,6 +743,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); + amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); + dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); for (i = 0; i < kfd->num_nodes; i++) { @@ -793,8 +795,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); - kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); From cd8d77f328c53aad8915c9c4d64cf557742bb257 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 27 May 2022 13:47:24 +0800 Subject: [PATCH 301/861] drm/amdgpu: add new vram type for dgpu hbm3 will be supported in some dgpu program Signed-off-by: Hawking Zhang Reviewed-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 1 + drivers/gpu/drm/amd/include/atomfirmware.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index ac6fe0ae4609..ef4b9a41f20a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -272,6 +272,7 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, break; case ATOM_DGPU_VRAM_TYPE_HBM2: case ATOM_DGPU_VRAM_TYPE_HBM2E: + case ATOM_DGPU_VRAM_TYPE_HBM3: vram_type = AMDGPU_VRAM_TYPE_HBM; break; case ATOM_DGPU_VRAM_TYPE_GDDR6: diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index bbe1337a8cee..e68c1e280322 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -182,6 +182,7 @@ enum atom_dgpu_vram_type { ATOM_DGPU_VRAM_TYPE_HBM2 = 0x60, ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61, ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70, + ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80, }; enum atom_dp_vs_preemph_def{ From 9eb7681f760c77adece36bc62953245c9f44a3be Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 21 Feb 2022 15:38:39 +0800 Subject: [PATCH 302/861] drm/amdgpu: add the support of XGMI link for GC 9.4.3 Add the xgmi LFB_CNTL/LBF_SIZE reg addresses to fetch the xgmi info from. v2: move get_xgmi_info() to GC_V9_4_3 sepecific source files to utilize the register definitions specific for GC_V9_4_3 v3: remove the duplicated register definitions v4: enable xgmi based on asic_type as XGMI_IP ver is not available yet for IP discovery Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Ack-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 41 +++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 ++++-- 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index d3424ce97aa8..1bb17d95f720 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -528,6 +528,45 @@ static void gfxhub_v1_2_init(struct amdgpu_device *adev) } } +static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev) +{ + u32 max_num_physical_nodes; + u32 max_physical_node_id; + u32 xgmi_lfb_cntl; + u32 max_region; + u64 seg_size; + + xgmi_lfb_cntl = RREG32_SOC15(GC, 0, regMC_VM_XGMI_LFB_CNTL); + seg_size = REG_GET_FIELD( + RREG32_SOC15(GC, 0, regMC_VM_XGMI_LFB_SIZE), + MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); + + + + max_num_physical_nodes = 8; + max_physical_node_id = 7; + + /* PF_MAX_REGION=0 means xgmi is disabled */ + if (max_region || adev->gmc.xgmi.connected_to_cpu) { + adev->gmc.xgmi.num_physical_nodes = max_region + 1; + + if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) + return -EINVAL; + + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, + PF_LFB_REGION); + + if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) + return -EINVAL; + + adev->gmc.xgmi.node_segment_size = seg_size; + } + + return 0; +} const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset, @@ -536,5 +575,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .gart_disable = gfxhub_v1_2_gart_disable, .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default, .init = gfxhub_v1_2_init, - .get_xgmi_info = gfxhub_v1_1_get_xgmi_info, + .get_xgmi_info = gfxhub_v1_2_get_xgmi_info, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 59be0c0293c4..4b2c4ecd7253 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1419,9 +1419,13 @@ static int gmc_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */ - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) + /* + * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined + * in their IP discovery tables + */ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) adev->gmc.xgmi.supported = true; if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { From 8078f1c610fdcdd8003e2c538fb04af41fa5c269 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 11:41:53 +0530 Subject: [PATCH 303/861] drm/amdgpu: Change num_xcd to xcc_mask Instead of number of XCCs, keep a mask of XCCs for the exact XCCs available on the ASIC. XCC configuration could differ based on different ASIC configs. v2: Rename num_xcd to num_xcc (Hawking) Use smaller xcc_mask size, changed to u16 (Le) Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 21 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 133 +++++++++++++---------- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 67 +++++++----- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 10 +- 7 files changed, 141 insertions(+), 99 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 76438f197de1..069b259f384c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -209,12 +209,12 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe, adev->gfx.num_compute_rings); - int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1; + int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; if (multipipe_policy) { /* policy: make queues evenly cross all pipes on MEC1 only * for multiple xcc, just use the original policy for simplicity */ - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; i++) { pipe = i % adev->gfx.mec.num_pipe_per_mec; queue = (i / adev->gfx.mec.num_pipe_per_mec) % @@ -226,13 +226,13 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) } } else { /* policy: amdgpu owns all queues in the given pipe */ - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; ++i) set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); } } - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); } @@ -1207,23 +1207,24 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); enum amdgpu_gfx_partition mode; - int ret; + int ret = 0, num_xcc; - if (adev->gfx.num_xcd % 2 != 0) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + if (num_xcc % 2 != 0) return -EINVAL; if (!strncasecmp("SPX", buf, strlen("SPX"))) { mode = AMDGPU_SPX_PARTITION_MODE; } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { - if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8) + if (num_xcc != 4 || num_xcc != 8) return -EINVAL; mode = AMDGPU_DPX_PARTITION_MODE; } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { - if (adev->gfx.num_xcd != 6) + if (num_xcc != 6) return -EINVAL; mode = AMDGPU_TPX_PARTITION_MODE; } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { - if (adev->gfx.num_xcd != 8) + if (num_xcc != 8) return -EINVAL; mode = AMDGPU_QPX_PARTITION_MODE; } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { @@ -1253,7 +1254,7 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, char *supported_partition; /* TBD */ - switch (adev->gfx.num_xcd) { + switch (NUM_XCC(adev->gfx.xcc_mask)) { case 8: supported_partition = "SPX, DPX, QPX, CPX"; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8df36527aee9..93f9875154db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -64,6 +64,8 @@ enum amdgpu_gfx_partition { AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, }; +#define NUM_XCC(x) hweight16(x) + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -396,7 +398,7 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ enum amdgpu_gfx_partition partition_mode; - uint32_t num_xcd; + uint16_t xcc_mask; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 91814dc083c9..da69177dc76f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4536,7 +4536,7 @@ static int gfx_v9_0_early_init(void *handle) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; - adev->gfx.num_xcd = 1; + adev->gfx.xcc_mask = 1; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_0_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 52185b1d5d31..c776fc5884de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -177,16 +177,19 @@ static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - int i; - for (i = 0; i < adev->gfx.num_xcd; i++) + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; } static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 2; i < adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 2; i < num_xcc; i++) WREG32_SOC15(GC, i, regGRBM_MCM_ADDR, 0x4); } @@ -499,7 +502,7 @@ static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) { - int r, i; + int r, i, num_xcc; u32 *hpd; const __le32 *fw_data; unsigned fw_size; @@ -508,7 +511,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - for (i = 0; i < adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -683,23 +687,24 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode) { u32 tmp = 0; - int num_xcc_per_partition, i; + int num_xcc_per_partition, i, num_xcc; if (mode == adev->gfx.partition_mode) return mode; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd; + num_xcc_per_partition = num_xcc; break; case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 2; + num_xcc_per_partition = num_xcc / 2; break; case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 3; + num_xcc_per_partition = num_xcc / 3; break; case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 4; + num_xcc_per_partition = num_xcc / 4; break; case AMDGPU_CPX_PARTITION_MODE: num_xcc_per_partition = 1; @@ -712,7 +717,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, * Stop user queues and threads, and make sure GPU is empty of work. */ - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, num_xcc_per_partition); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, @@ -836,7 +841,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v9_4_3_sw_init(void *handle) { - int i, j, k, r, ring_id, xcc_id; + int i, j, k, r, ring_id, xcc_id, num_xcc; struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -844,6 +849,8 @@ static int gfx_v9_4_3_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + /* EOP Event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); if (r) @@ -877,8 +884,7 @@ static int gfx_v9_4_3_sw_init(void *handle) /* set up the compute queues - allocate horizontally across pipes */ ring_id = 0; - for (xcc_id = 0; xcc_id < adev->gfx.num_xcd; xcc_id++) { - + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; @@ -930,14 +936,14 @@ static int gfx_v9_4_3_sw_init(void *handle) static int gfx_v9_4_3_sw_fini(void *handle) { - int i; + int i, num_xcc; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - for (i = 0; i < adev->gfx.num_compute_rings * - adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { amdgpu_gfx_mqd_sw_fini(adev, i); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); amdgpu_gfx_kiq_fini(adev, i); @@ -1050,9 +1056,10 @@ static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) { u32 tmp; - int i, j; + int i, j, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff); gfx_v9_4_3_setup_rb(adev, i); } @@ -1064,7 +1071,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { - for (j = 0; j < adev->gfx.num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { soc15_grbm_select(adev, 0, 0, 0, i, j); /* CP and shaders */ if (i == 0) { @@ -1092,7 +1099,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { gfx_v9_4_3_init_compute_vmid(adev, i); gfx_v9_4_3_init_gds_vmid(adev, i); } @@ -1150,8 +1157,10 @@ static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) { uint32_t tmp = 0; + int num_xcc; - switch (adev->gfx.num_xcd) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + switch (num_xcc) { /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ case 1: WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, 0x8); @@ -1288,9 +1297,10 @@ static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); gfx_v9_4_3_wait_for_rlc_serdes(adev, i); @@ -1299,9 +1309,10 @@ static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); @@ -1314,9 +1325,10 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) #ifdef AMDGPU_RLC_DEBUG_RETRY u32 rlc_ucode_ver; #endif - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1); udelay(50); @@ -1377,11 +1389,12 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) { - int r, i; + int r, i, num_xcc; adev->gfx.rlc.funcs->stop(adev); - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* disable CG */ WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0); @@ -1954,10 +1967,11 @@ done: static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r, i, j; + int r, i, j, num_xcc; struct amdgpu_ring *ring; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2021,12 +2035,13 @@ static int gfx_v9_4_3_hw_init(void *handle) static int gfx_v9_4_3_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (amdgpu_gfx_disable_kcq(adev, i)) DRM_ERROR("XCD %d KCQ disable failed\n", i); @@ -2069,9 +2084,10 @@ static int gfx_v9_4_3_resume(void *handle) static bool gfx_v9_4_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) return false; @@ -2183,30 +2199,30 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, static int gfx_v9_4_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int num_xcc; - /* hardcode in emulation phase */ - adev->gfx.num_xcd = 1; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); adev->gfx.partition_mode = amdgpu_user_partt_mode; /* calculate the num_xcc_in_xcp for the partition mode*/ switch (amdgpu_user_partt_mode) { case AMDGPU_SPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + adev->gfx.num_xcc_per_xcp = num_xcc; break; case AMDGPU_DPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 2; + adev->gfx.num_xcc_per_xcp = num_xcc / 2; break; case AMDGPU_TPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 3; + adev->gfx.num_xcc_per_xcp = num_xcc / 3; break; case AMDGPU_QPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 4; + adev->gfx.num_xcc_per_xcp = num_xcc / 4; break; case AMDGPU_CPX_PARTITION_MODE: adev->gfx.num_xcc_per_xcp = 1; break; default: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + adev->gfx.num_xcc_per_xcp = num_xcc; break; } @@ -2404,14 +2420,15 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; if (amdgpu_sriov_vf(adev)) return 0; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) gfx_v9_4_3_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE, i); break; @@ -2739,12 +2756,13 @@ static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; + int i, num_xcc; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); @@ -2761,12 +2779,13 @@ static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; + int i, num_xcc; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); @@ -2783,8 +2802,10 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; - for (i = 0; i < adev->gfx.num_xcd; i++) { + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { switch (type) { case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i); @@ -2842,6 +2863,7 @@ static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, /* Per-queue interrupt is supported for MEC starting from VI. * The interrupt can only be enabled/disabled per pipe instead of per queue. */ + if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) amdgpu_fence_process(ring); } @@ -3056,9 +3078,10 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) { - int i, j; + int i, j, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; for (j = 0; j < adev->gfx.num_compute_rings; j++) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 1bb17d95f720..e35365ab3f1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -43,9 +43,10 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint64_t page_table_base) { struct amdgpu_vmhub *hub; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; WREG32_SOC15_OFFSET(GC, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -56,13 +57,14 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); + } } static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; - int i; + int i, num_xcc; if (adev->gmc.pdb0_bo) pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); @@ -74,7 +76,8 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -111,9 +114,10 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Program the AGP BAR */ WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0); WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); @@ -177,9 +181,10 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Setup TLB control */ tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL); @@ -202,9 +207,10 @@ static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Setup L2 cache */ tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -249,9 +255,10 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, @@ -266,9 +273,10 @@ static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_SOC15(GC, i, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); @@ -295,7 +303,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) struct amdgpu_vmhub *hub; unsigned num_level, block_size; uint32_t tmp; - int i, j; + int i, j, num_xcc; num_level = adev->vm_manager.num_level; block_size = adev->vm_manager.block_size; @@ -304,7 +312,8 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i); @@ -362,10 +371,12 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - unsigned i, j; + unsigned i, j, num_xcc; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; + for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); @@ -377,9 +388,10 @@ static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (amdgpu_sriov_vf(adev)) { /* * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are @@ -413,9 +425,10 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; u32 tmp; - u32 i, j; + u32 i, j, num_xcc; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; /* Disable all tables */ for (i = 0; i < 16; i++) @@ -449,9 +462,10 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -490,9 +504,10 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; hub->ctx0_ptb_addr_lo32 = diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 4b2c4ecd7253..2c322a25bf1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1733,7 +1733,8 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; case IP_VERSION(9, 4, 3): - bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), adev->gfx.num_xcd); + bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), + NUM_XCC(adev->gfx.xcc_mask)); bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), adev->num_aid); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 829e32433faf..df96c4c508a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -592,6 +592,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; + int num_xcd; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -601,16 +602,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || - kfd->adev->gfx.num_xcc_per_xcp == 0) + num_xcd = NUM_XCC(kfd->adev->gfx.xcc_mask); + if (num_xcd == 0 || num_xcd == 1 || kfd->adev->gfx.num_xcc_per_xcp == 0) kfd->num_nodes = 1; else - kfd->num_nodes = - kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; + kfd->num_nodes = num_xcd / kfd->adev->gfx.num_xcc_per_xcp; if (kfd->num_nodes == 0) { dev_err(kfd_device, "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", - kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + num_xcd, kfd->adev->gfx.num_xcc_per_xcp); goto out; } From d244a4167a0276db054d588e11f8142942bcf529 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 16:04:26 -0400 Subject: [PATCH 304/861] drm/amdgpu/vcn: use vcn4 irqsrc header for VCN 4.0.3 Use vcn4 irqsrc header for VCN 4.0.3. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index ae2cc47d344a..fafce2beb6cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -35,7 +35,7 @@ #include "vcn/vcn_4_0_3_offset.h" #include "vcn/vcn_4_0_3_sh_mask.h" -#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" +#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX @@ -104,7 +104,7 @@ static int vcn_v4_0_3_sw_init(void *handle) /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); + VCN_4_0__SRCID__UVD_TRAP, &adev->vcn.inst->irq); if (r) return r; @@ -1380,7 +1380,7 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); switch (entry->src_id) { - case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: + case VCN_4_0__SRCID__UVD_TRAP: amdgpu_fence_process(&adev->vcn.inst->ring_dec); break; default: From f9f74df58faa4d6744acc6d91a81a86895bc7fe8 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 16:41:16 -0400 Subject: [PATCH 305/861] drm/amdgpu/vcn: update vcn header to support multiple AIDs Add aid_id in vcn header to support multiple AIDs Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c730949ece7d..1024a06359ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -242,6 +242,7 @@ struct amdgpu_vcn_inst { uint32_t *dpg_sram_curr_addr; atomic_t dpg_enc_submission_cnt; struct amdgpu_vcn_fw_shared fw_shared; + uint8_t aid_id; }; struct amdgpu_vcn_ras { @@ -271,6 +272,8 @@ struct amdgpu_vcn { struct ras_common_if *ras_if; struct amdgpu_vcn_ras *ras; + + uint8_t num_inst_per_aid; }; struct amdgpu_fw_shared_rb_ptrs_struct { From 492c464750587ea033d7a2fcaa21f902e345b383 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 16:41:52 -0400 Subject: [PATCH 306/861] drm/amdgpu/jpeg: update jpeg header to support multiple AIDs Add aid_id in jpeg header to support multiple AIDs. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index cb6c127ab81d..5c200a508fa3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -40,6 +40,7 @@ struct amdgpu_jpeg_inst { struct amdgpu_ring ring_dec[AMDGPU_MAX_JPEG_RINGS]; struct amdgpu_irq_src irq; struct amdgpu_jpeg_reg external; + uint8_t aid_id; }; struct amdgpu_jpeg_ras { @@ -58,6 +59,7 @@ struct amdgpu_jpeg { atomic_t total_submission_cnt; struct ras_common_if *ras_if; struct amdgpu_jpeg_ras *ras; + uint8_t num_inst_per_aid; }; int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); From 53054e9a7775c228ada4d052f3e7849e71072811 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 19:34:00 -0400 Subject: [PATCH 307/861] drm/amdgpu/vcn: update new doorbell map New doorbell map is used for VCN 4.0.3. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 2 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 8914f3c6c80f..e12e3646c49a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -102,7 +102,7 @@ static int jpeg_v4_0_3_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec[i]; ring->use_doorbell = true; ring->vm_hub = AMDGPU_MMHUB0(0); - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (i?8:1) + i; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 266b504fd83e..962627005961 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -161,7 +161,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, BIF_DOORBELL0_RANGE_SIZE_ENTRY, - 0x10); + 0x9); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -174,7 +174,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, - S2A_DOORBELL_PORT1_RANGE_SIZE, 0x10); + S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index fafce2beb6cf..ddd844cca02e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -110,7 +110,7 @@ static int vcn_v4_0_3_sw_init(void *handle) ring = &adev->vcn.inst->ring_dec; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 5; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1); ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -176,7 +176,7 @@ static int vcn_v4_0_3_hw_init(void *handle) int r; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1), ring->me); + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); if (ring->use_doorbell) WREG32_SOC15(VCN, ring->me, regVCN_RB4_DB_CTRL, ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | From 429a111851bc1f7388fe44af36166d399583a18f Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 20:00:41 -0400 Subject: [PATCH 308/861] drm/amdgpu/vcn: update ucode setup Use common amdgpu_vcn_setup_ucode for ucode setup. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index ddd844cca02e..5eaaac531ab6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -87,16 +87,7 @@ static int vcn_v4_0_3_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - - DRM_DEV_INFO(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) From 7f0af02ab362df7f064df690fd59659a1cdb9b44 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 20:03:18 -0400 Subject: [PATCH 309/861] drm/amdgpu/vcn: remove unused code Remove unused code. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 121 ------------------------ 1 file changed, 121 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 5eaaac531ab6..93c18fd7de77 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -416,117 +416,6 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indir VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } -/** - * vcn_v4_0_disable_static_power_gating - disable VCN static power gating - * - * @adev: amdgpu_device pointer - * - * Disable static power gating for VCN block - */ -static void vcn_v4_0_3_disable_static_power_gating(struct amdgpu_device *adev) -{ - uint32_t data = 0; - - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { - data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); - - WREG32_SOC15(VCN, 0, regUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); - } else { - data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT - | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); - - WREG32_SOC15(VCN, 0, regUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_PGFSM_STATUS, 0, 0x3F3FFFFF); - } - - data = RREG32_SOC15(VCN, 0, regUVD_POWER_STATUS); - data &= ~0x103; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) - data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | - UVD_POWER_STATUS__UVD_PG_EN_MASK; - - WREG32_SOC15(VCN, 0, regUVD_POWER_STATUS, data); -} - -/** - * vcn_v4_0_3_enable_static_power_gating - enable VCN static power gating - * - * @adev: amdgpu_device pointer - * - * Enable static power gating for VCN block - */ -static void vcn_v4_0_3_enable_static_power_gating(struct amdgpu_device *adev) -{ - uint32_t data; - - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { - /* Before power off, this indicator has to be turned on */ - data = RREG32_SOC15(VCN, 0, regUVD_POWER_STATUS); - data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; - data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; - WREG32_SOC15(VCN, 0, regUVD_POWER_STATUS, data); - - data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT - | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); - WREG32_SOC15(VCN, 0, regUVD_PGFSM_CONFIG, data); - - data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT - | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF); - } -} - /** * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating * @@ -869,9 +758,6 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) return vcn_v4_0_3_start_dpg_mode(adev, adev->vcn.indirect_sram); - /* disable VCN power gating */ - vcn_v4_0_3_disable_static_power_gating(adev); - /* set VCN status busy */ tmp = RREG32_SOC15(VCN, 0, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; WREG32_SOC15(VCN, 0, regUVD_STATUS, tmp); @@ -1119,19 +1005,12 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); - tmp = RREG32_SOC15(VCN, 0, regUVD_VCPU_CNTL); - tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; - WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); - /* clear VCN status */ WREG32_SOC15(VCN, 0, regUVD_STATUS, 0); /* apply HW clock gating */ vcn_v4_0_3_enable_clock_gating(adev); - /* enable VCN power gating */ - vcn_v4_0_3_enable_static_power_gating(adev); - Done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); From cf1f3737c2d336b1473596cc7356bdffa186bc71 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 11 Jul 2022 10:58:40 -0400 Subject: [PATCH 310/861] drm/amdgpu/vcn: update amdgpu_fw_shared to amdgpu_vcn4_fw_shared Use amdgpu_vcn4_fw_shared for vcn 4.0.3. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 40 +++++++------------------ 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 93c18fd7de77..0b2b97593bac 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -79,7 +79,7 @@ static int vcn_v4_0_3_early_init(void *handle) static int vcn_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - volatile struct amdgpu_fw_shared *fw_shared; + volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; int r; @@ -111,10 +111,8 @@ static int vcn_v4_0_3_sw_init(void *handle) return r; fw_shared = adev->vcn.inst->fw_shared.cpu_addr; - fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | - cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | - cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); - fw_shared->sw_ring.is_enabled = cpu_to_le32(true); + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = cpu_to_le32(true); if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode; @@ -135,11 +133,11 @@ static int vcn_v4_0_3_sw_fini(void *handle) int r, idx; if (drm_dev_enter(&adev->ddev, &idx)) { - volatile struct amdgpu_fw_shared *fw_shared; + volatile struct amdgpu_vcn4_fw_shared *fw_shared; fw_shared = adev->vcn.inst->fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; - fw_shared->sw_ring.is_enabled = cpu_to_le32(false); + fw_shared->sq.is_enabled = cpu_to_le32(false); drm_dev_exit(idx); } @@ -304,7 +302,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev) upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr)); WREG32_SOC15(VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0, 0); WREG32_SOC15(VCN, 0, regUVD_VCPU_NONCACHE_SIZE0, - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); } /** @@ -407,7 +405,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indir VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( @@ -612,7 +610,7 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) */ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; @@ -702,7 +700,6 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); ring = &adev->vcn.inst->ring_dec; - fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); /* program the RB_BASE for ring buffer */ WREG32_SOC15(VCN, 0, regUVD_RB_BASE_LO4, @@ -728,12 +725,8 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(VCN, 0, regUVD_SCRATCH2, 0); - /* Reset FW shared memory RBC WPTR/RPTR */ - fw_shared->rb.rptr = 0; - fw_shared->rb.wptr = lower_32_bits(ring->wptr); - /*resetting done, fw can check RB ring */ - fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); return 0; } @@ -747,7 +740,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) */ static int vcn_v4_0_3_start(struct amdgpu_device *adev) { - volatile struct amdgpu_fw_shared *fw_shared; + volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -878,7 +871,6 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) ring = &adev->vcn.inst->ring_dec; fw_shared = adev->vcn.inst->fw_shared.cpu_addr; - fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); /* program the RB_BASE for ring buffer */ WREG32_SOC15(VCN, 0, regUVD_RB_BASE_LO4, @@ -902,8 +894,7 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); ring->wptr = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); - fw_shared->rb.wptr = cpu_to_le32(lower_32_bits(ring->wptr)); - fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + fw_shared->sq.queue_mode &= cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); return 0; } @@ -1075,15 +1066,6 @@ static uint64_t vcn_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - volatile struct amdgpu_fw_shared *fw_shared; - - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ - fw_shared = adev->vcn.inst->fw_shared.cpu_addr; - fw_shared->rb.wptr = lower_32_bits(ring->wptr); - WREG32_SOC15(VCN, ring->me, regUVD_SCRATCH2, - lower_32_bits(ring->wptr)); - } if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); From 07bc768afb0a1c638f4eb054aaed9eaa291132de Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 9 Aug 2022 12:58:58 -0400 Subject: [PATCH 311/861] drm/amdgpu: reflect psp xgmi topology info for gfx9.4.3 Similar to GFX9.4.2 non-A+A devices, GFX9.4.3 psp xgmi topology info is half duplex and requires the driver to fill in the bidirectional info. Signed-off-by: Jonathan Kim Reviewed-by: Shiwu Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index aa37b703c718..dd7f6d688449 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1254,8 +1254,9 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) { - return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && - psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b; + return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || + psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6); } /* @@ -1363,6 +1364,9 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, /* Invoke xgmi ta again to get the link information */ if (psp_xgmi_peer_link_info_supported(psp)) { struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output; + bool requires_reflection = + (psp->xgmi_context.supports_extended_data && get_extended_data) || + psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6); xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; @@ -1380,8 +1384,7 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, link_info_output->nodes[i].num_links; /* reflect the topology information for bi-directionality */ - if (psp->xgmi_context.supports_extended_data && - get_extended_data && topology->nodes[i].num_hops) + if (requires_reflection && topology->nodes[i].num_hops) psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); } } From 1526ec9a3ed90e7ad36de7ef8aa2768b60af14df Mon Sep 17 00:00:00 2001 From: David Belanger Date: Wed, 16 Feb 2022 12:07:28 -0500 Subject: [PATCH 312/861] drm/amdkfd: EOP Removal - Handle size 0 correctly On GC 9.4.3, we are removing the EOP buffer. If we specify 0 for the size, CP_HQD_EOP_CONTROL ends up with incorrect value as order_size_2 calculations does not handle 0. Fix it by using zero for the MQD entry for EOP size 0. v2: Reworked code with a conditional assignment and fixed style issues. Signed-off-by: David Belanger Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 5c9b3392758e..2e2a0f8586f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -257,9 +257,14 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit * more than (EOP entry count - 1) so a queue size of 0x800 dwords * is safe, giving a maximum field value of 0xA. + * + * Also, do calculation only if EOP is used (size > 0), otherwise + * the order_base_2 calculation provides incorrect result. + * */ - m->cp_hqd_eop_control = min(0xA, - order_base_2(q->eop_ring_buffer_size / 4) - 1); + m->cp_hqd_eop_control = q->eop_ring_buffer_size ? + min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0; + m->cp_hqd_eop_base_addr_lo = lower_32_bits(q->eop_ring_buffer_address >> 8); m->cp_hqd_eop_base_addr_hi = From 0c552ed38780f24b7ac235c3d10c6c94686ecfdf Mon Sep 17 00:00:00 2001 From: Le Ma Date: Sat, 2 Apr 2022 19:39:59 +0800 Subject: [PATCH 313/861] drm/amdgpu: add indirect r/w interface for smn address greater than 32bits On multiple AIDs platform, bit[34:32] in SMD address is leveraged to access nonAID0 register smn address and new PCI_INDEX_HI register is introduced to access the higher bits. v2: rebase on latest register accessors (Alex) Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 98 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 1 + drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 6 ++ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 + 5 files changed, 118 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5f4396185a2e..729d5eedae49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -608,6 +608,9 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); +typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t); +typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); + typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); @@ -798,6 +801,8 @@ struct amdgpu_device { amdgpu_wreg_t pcie_wreg; amdgpu_rreg_t pciep_rreg; amdgpu_wreg_t pciep_wreg; + amdgpu_rreg_ext_t pcie_rreg_ext; + amdgpu_wreg_ext_t pcie_wreg_ext; amdgpu_rreg64_t pcie_rreg64; amdgpu_wreg64_t pcie_wreg64; /* protects concurrent UVD register access */ @@ -1088,9 +1093,13 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, uint32_t expected_value, uint32_t mask); uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); +u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, + u64 reg_addr); void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); +void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, + u64 reg_addr, u32 reg_data); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); @@ -1142,6 +1151,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) +#define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg)) +#define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 38b6bbcaf186..c190365d67e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -707,6 +707,48 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, return r; } +u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, + u64 reg_addr) +{ + unsigned long flags, pcie_index, pcie_index_hi, pcie_data; + u32 r; + void __iomem *pcie_index_offset; + void __iomem *pcie_index_hi_offset; + void __iomem *pcie_data_offset; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + if (adev->nbio.funcs->get_pcie_index_hi_offset) + pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); + else + pcie_index_hi = 0; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + if (pcie_index_hi != 0) + pcie_index_hi_offset = (void __iomem *)adev->rmmio + + pcie_index_hi * 4; + + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + if (pcie_index_hi != 0) { + writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + r = readl(pcie_data_offset); + + /* clear the high bits */ + if (pcie_index_hi != 0) { + writel(0, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + return r; +} + /** * amdgpu_device_indirect_rreg64 - read a 64bits indirect register * @@ -774,6 +816,46 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } +void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, + u64 reg_addr, u32 reg_data) +{ + unsigned long flags, pcie_index, pcie_index_hi, pcie_data; + void __iomem *pcie_index_offset; + void __iomem *pcie_index_hi_offset; + void __iomem *pcie_data_offset; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + if (adev->nbio.funcs->get_pcie_index_hi_offset) + pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); + else + pcie_index_hi = 0; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + if (pcie_index_hi != 0) + pcie_index_hi_offset = (void __iomem *)adev->rmmio + + pcie_index_hi * 4; + + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + if (pcie_index_hi != 0) { + writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + writel(reg_data, pcie_data_offset); + readl(pcie_data_offset); + + /* clear the high bits */ + if (pcie_index_hi != 0) { + writel(0, pcie_index_hi_offset); + readl(pcie_index_hi_offset); + } + + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + /** * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address * @@ -840,6 +922,13 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) return 0; } +static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg) +{ + DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + BUG(); + return 0; +} + /** * amdgpu_invalid_wreg - dummy reg write function * @@ -857,6 +946,13 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32 BUG(); } +static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v) +{ + DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n", + reg, v); + BUG(); +} + /** * amdgpu_invalid_rreg64 - dummy 64 bit reg read function * @@ -3611,6 +3707,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->smc_wreg = &amdgpu_invalid_wreg; adev->pcie_rreg = &amdgpu_invalid_rreg; adev->pcie_wreg = &amdgpu_invalid_wreg; + adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext; + adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext; adev->pciep_rreg = &amdgpu_invalid_rreg; adev->pciep_wreg = &amdgpu_invalid_wreg; adev->pcie_rreg64 = &amdgpu_invalid_rreg64; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 6a636c34b717..8fa3a1f3b181 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -61,6 +61,7 @@ struct amdgpu_nbio_funcs { u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_index_hi_offset)(struct amdgpu_device *adev); u32 (*get_pcie_port_index_offset)(struct amdgpu_device *adev); u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev); u32 (*get_rev_id)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 962627005961..92e9c5ed95df 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -319,6 +319,11 @@ static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2); } +static u32 nbio_v7_9_get_pcie_index_hi_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2_HI); +} + const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = { .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, @@ -382,6 +387,7 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_9_get_pcie_index_offset, .get_pcie_data_offset = nbio_v7_9_get_pcie_data_offset, + .get_pcie_index_hi_offset = nbio_v7_9_get_pcie_index_hi_offset, .get_rev_id = nbio_v7_9_get_rev_id, .mc_access_enable = nbio_v7_9_mc_access_enable, .get_memsize = nbio_v7_9_get_memsize, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index b7e8af56df84..b9bcb12bff91 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -866,6 +866,8 @@ static int soc15_common_early_init(void *handle) adev->smc_wreg = NULL; adev->pcie_rreg = &amdgpu_device_indirect_rreg; adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext; + adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext; adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; From 6b22ef25748fb77030bac02e2147f6d738212ac3 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Sat, 2 Apr 2022 20:21:35 +0800 Subject: [PATCH 314/861] drm/amdgpu: configure the doorbell settings for sdma on non-AID0 Configure the sdma doorbell settings on NBIF0 and SYSHUB of each AID v2: fetch aid_id from amdgpu_sdma_instance (Lijo) Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 65 ++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 92e9c5ed95df..5a5bab665f60 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -62,10 +62,23 @@ static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev) return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); } +#define S2A_DOORBELL_REG_LSD_OFFSET 0x40 + +/* Temporarily add 2 macros below. Range is 0 ~ 3 as total AID number is 4. + * They will be obsoleted after the latest ip offset header + * is imported in driver in near future. + */ +#define AMDGPU_SMN_TARGET_AID(x) ((u64)(x) << 32) +#define AMDGPU_SMN_CROSS_AID (1ULL << 34) + static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size) { u32 doorbell_range = 0, doorbell_ctrl = 0; + int aid_id = adev->sdma.instance[instance].aid_id; + + if (use_doorbell == false) + return; doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, @@ -80,9 +93,10 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size); - switch (instance) { + switch (instance % adev->sdma.num_inst_per_aid) { case 0: - WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1, doorbell_range); + WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1) + + 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -94,10 +108,15 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x1); - WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, doorbell_ctrl); + WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL) + + S2A_DOORBELL_REG_LSD_OFFSET) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); break; case 1: - WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2, doorbell_range); + WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2) + + 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -109,10 +128,15 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x2); - WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_ctrl); + WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL) + + S2A_DOORBELL_REG_LSD_OFFSET) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); break; case 2: - WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3, doorbell_range); + WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3) + + 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -124,10 +148,22 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x8); - WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_ctrl); + if (aid_id != 0) + WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, + regS2A_DOORBELL_ENTRY_3_CTRL) + + S2A_DOORBELL_REG_LSD_OFFSET) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); + else + WREG32(SOC15_REG_OFFSET(NBIO, 0, + regS2A_DOORBELL_ENTRY_5_CTRL) + + S2A_DOORBELL_REG_LSD_OFFSET, + doorbell_ctrl); break; case 3: - WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4, doorbell_range); + WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4) + + 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -139,7 +175,18 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x9); - WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_6_CTRL, doorbell_ctrl); + if (aid_id != 0) + WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, + regS2A_DOORBELL_ENTRY_4_CTRL) + + S2A_DOORBELL_REG_LSD_OFFSET) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); + else + WREG32(SOC15_REG_OFFSET(NBIO, 0, + regS2A_DOORBELL_ENTRY_6_CTRL) + + S2A_DOORBELL_REG_LSD_OFFSET, + doorbell_ctrl); break; default: break; From fe1f05df5919c67c3add49efb55e251a8d78ee4e Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 May 2022 14:39:36 -0400 Subject: [PATCH 315/861] drm/amdkfd: Rework kfd_locked handling Currently, even if kfd_locked is set, a process is first created and then removed to work around a race condition in updating kfd_locked flag. Rework kfd_locked handling to ensure no processes is created if kfd_locked is set. This is achieved by updating kfd_locked under kfd_processes_mutex. With this there is no need for kfd_locked to be an atomic counter. Instead, it can be a regular integer. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ------- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 21 ++++++++++++++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 8 +++++++- 4 files changed, 25 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 45e8da125f70..8b9accecf49b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -146,13 +146,6 @@ static int kfd_open(struct inode *inode, struct file *filep) if (IS_ERR(process)) return PTR_ERR(process); - if (kfd_is_locked()) { - dev_dbg(kfd_device, "kfd is locked!\n" - "process %d unreferenced", process->pasid); - kfd_unref_process(process); - return -EAGAIN; - } - /* filep now owns the reference returned by kfd_create_process */ filep->private_data = process; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index df96c4c508a0..eb2b44fddf74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -42,7 +42,7 @@ * once locked, kfd driver will stop any further GPU execution. * create process (open) will return -EAGAIN. */ -static atomic_t kfd_locked = ATOMIC_INIT(0); +static int kfd_locked; #ifdef CONFIG_DRM_AMDGPU_CIK extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; @@ -880,7 +880,9 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) return ret; } - atomic_dec(&kfd_locked); + mutex_lock(&kfd_processes_mutex); + --kfd_locked; + mutex_unlock(&kfd_processes_mutex); for (i = 0; i < kfd->num_nodes; i++) { node = kfd->nodes[i]; @@ -893,21 +895,27 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) bool kfd_is_locked(void) { - return (atomic_read(&kfd_locked) > 0); + lockdep_assert_held(&kfd_processes_mutex); + return (kfd_locked > 0); } void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { struct kfd_node *node; int i; + int count; if (!kfd->init_complete) return; /* for runtime suspend, skip locking kfd */ if (!run_pm) { + mutex_lock(&kfd_processes_mutex); + count = ++kfd_locked; + mutex_unlock(&kfd_processes_mutex); + /* For first KFD device suspend all the KFD processes */ - if (atomic_inc_return(&kfd_locked) == 1) + if (count == 1) kfd_suspend_all_processes(); } @@ -933,7 +941,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) /* for runtime resume, skip unlocking kfd */ if (!run_pm) { - count = atomic_dec_return(&kfd_locked); + mutex_lock(&kfd_processes_mutex); + count = --kfd_locked; + mutex_unlock(&kfd_processes_mutex); + WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); if (count == 0) ret = kfd_resume_all_processes(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 5cfebcc8b305..400b4dcbdf05 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -201,6 +201,8 @@ extern int amdgpu_no_queue_eviction_on_vm_fault; /* Enable eviction debug messages */ extern bool debug_evictions; +extern struct mutex kfd_processes_mutex; + enum cache_policy { cache_policy_coherent, cache_policy_noncoherent diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 9b1e84d33cdc..c3d43e6e5236 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -50,7 +50,7 @@ struct mm_struct; * Unique/indexed by mm_struct* */ DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); -static DEFINE_MUTEX(kfd_processes_mutex); +DEFINE_MUTEX(kfd_processes_mutex); DEFINE_SRCU(kfd_processes_srcu); @@ -818,6 +818,12 @@ struct kfd_process *kfd_create_process(struct file *filep) */ mutex_lock(&kfd_processes_mutex); + if (kfd_is_locked()) { + mutex_unlock(&kfd_processes_mutex); + pr_debug("KFD is locked! Cannot create process"); + return ERR_PTR(-EINVAL); + } + /* A prior open of /dev/kfd could have already created the process. */ process = find_process(thread, false); if (process) { From 0c7315e7d5ef9b36ca4db32ffeb34a187cbaf231 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 10 Jun 2022 09:41:29 -0400 Subject: [PATCH 316/861] drm/amdkfd: Add device repartition support GFX9.4.3 will support dynamic repartitioning of the GPU through sysfs. Add device repartitioning support in KFD to repartition GPU from one mode to other. v2: squash in fix ("drm/amdkfd: Fix warning kgd2kfd_unlock_kfd defined but not used") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 22 +++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 5 +---- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 21 +++++++++++++++++++++ 5 files changed, 66 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 9d19c7ceda3f..bbbfe9ec4adf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -773,3 +773,13 @@ bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) else return false; } + +int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) +{ + return kgd2kfd_check_and_lock_kfd(); +} + +void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) +{ + kgd2kfd_unlock_kfd(); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index df07e212c21e..d1d643a050a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -151,6 +151,8 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); +int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev); +void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev); int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, @@ -373,6 +375,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); +int kgd2kfd_check_and_lock_kfd(void); +void kgd2kfd_unlock_kfd(void); #else static inline int kgd2kfd_init(void) { @@ -438,5 +442,14 @@ static inline void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { } + +static inline int kgd2kfd_check_and_lock_kfd(void) +{ + return 0; +} + +static inline void kgd2kfd_unlock_kfd(void) +{ +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 069b259f384c..69bac5b801ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1233,10 +1233,30 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } + if (!adev->kfd.init_complete) + return -EPERM; + mutex_lock(&adev->gfx.partition_mutex); - ret = adev->gfx.funcs->switch_partition_mode(adev, mode); + if (mode == adev->gfx.funcs->query_partition_mode(adev)) + goto out; + ret = amdgpu_amdkfd_check_and_lock_kfd(adev); + if (ret) + goto out; + + amdgpu_amdkfd_device_fini_sw(adev); + + adev->gfx.funcs->switch_partition_mode(adev, mode); + + amdgpu_amdkfd_device_probe(adev); + amdgpu_amdkfd_device_init(adev); + /* If KFD init failed, return failure */ + if (!adev->kfd.init_complete) + ret = -EIO; + + amdgpu_amdkfd_unlock_kfd(adev); +out: mutex_unlock(&adev->gfx.partition_mutex); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c776fc5884de..47d8ac64e877 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -675,7 +675,7 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, static enum amdgpu_gfx_partition gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) { - enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; if (adev->nbio.funcs->get_compute_partition_mode) mode = adev->nbio.funcs->get_compute_partition_mode(adev); @@ -689,9 +689,6 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, u32 tmp = 0; int num_xcc_per_partition, i, num_xcc; - if (mode == adev->gfx.partition_mode) - return mode; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index eb2b44fddf74..293787290e36 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1356,6 +1356,27 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) kfd_get_num_sdma_engines(node); } +int kgd2kfd_check_and_lock_kfd(void) +{ + mutex_lock(&kfd_processes_mutex); + if (!hash_empty(kfd_processes_table) || kfd_is_locked()) { + mutex_unlock(&kfd_processes_mutex); + return -EBUSY; + } + + ++kfd_locked; + mutex_unlock(&kfd_processes_mutex); + + return 0; +} + +void kgd2kfd_unlock_kfd(void) +{ + mutex_lock(&kfd_processes_mutex); + --kfd_locked; + mutex_unlock(&kfd_processes_mutex); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS From 5d30cbb4dbf096bb964fb9c5fd4417cad13445a4 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 14:59:04 +0530 Subject: [PATCH 317/861] drm/amdgpu: Add map of logical to physical inst Add a map for logical to physical instances of an IP. For ex: on some device configurations, the first logical XCC may not be the first physical XCC. Software may continue to access in logical IP instance order. The map provides a convenient way to get to the actual physical instance. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 729d5eedae49..32112db5c841 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -669,6 +669,15 @@ enum amd_hw_ip_block_type { #define IP_VERSION_MIN(ver) (((ver) >> 8) & 0xFF) #define IP_VERSION_REV(ver) ((ver) & 0xFF) +struct amdgpu_ip_map_info { + /* Map of logical to actual dev instances */ + uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE]; + int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst); + +}; + struct amd_powerplay { void *pp_handle; const struct amd_pm_funcs *pp_funcs; @@ -968,6 +977,7 @@ struct amdgpu_device { /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; + struct amdgpu_ip_map_info ip_map; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; From 659a4ab8e27eb39cc61cb74cc714ba1a8f8c9a61 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 5 Jul 2022 09:56:41 +0530 Subject: [PATCH 318/861] drm/amdgpu: Use instance lookup table for GC 9.4.3 Register accesses need to be based on physical instance on bare metal. Pass the right instance using logical to physical instance lookup table before accessing registers. Add a macro GET_INST to get the right physical instance of an IP corresponding to a logical instance. v2: fix gfx_v9_4_3_check_rlcg_range() (Alex) Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 20 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 381 +++++++++--------- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 146 +++---- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 3 + 4 files changed, 279 insertions(+), 271 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index e81bdca53f42..5daec0b45545 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -279,8 +279,8 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, /* HQD registers extend to CP_HQD_AQL_DISPATCH_ID_HI */ mqd_hqd = &m->cp_mqd_base_addr_lo; - hqd_base = SOC15_REG_OFFSET(GC, inst, regCP_MQD_BASE_ADDR); - hqd_end = SOC15_REG_OFFSET(GC, inst, regCP_HQD_AQL_DISPATCH_ID_HI); + hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_MQD_BASE_ADDR); + hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI); for (reg = hqd_base; reg <= hqd_end; reg++) WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); @@ -289,7 +289,7 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_DOORBELL_CONTROL), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL), data); if (wptr) { @@ -319,27 +319,27 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO), lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR), lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, inst, regCP_PQ_WPTR_POLL_CNTL1), + WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1), (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_EOP_RPTR), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR), REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_ACTIVE), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE), data); kgd_gfx_v9_release_queue(adev, inst); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 47d8ac64e877..e9c12b4970f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -190,7 +190,7 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 2; i < num_xcc; i++) - WREG32_SOC15(GC, i, regGRBM_MCM_ADDR, 0x4); + WREG32_SOC15(GC, GET_INST(GC, i), regGRBM_MCM_ADDR, 0x4); } static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, @@ -236,7 +236,7 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) /* scratch_reg0_offset is 32bit even with full XCD config */ uint32_t scratch_reg0_offset; - scratch_reg0_offset = SOC15_REG_OFFSET(GC, ring->xcc_id, regSCRATCH_REG0); + scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); WREG32(scratch_reg0_offset, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); @@ -324,9 +324,9 @@ static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); amdgpu_gfx_off_ctrl(adev, true); @@ -598,24 +598,24 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - WREG32_SOC15_RLC_SHADOW_EX(reg, GC, xcc_id, regGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); } static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) { - WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, GET_INST(GC, 0), regSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (address << SQ_IND_INDEX__INDEX__SHIFT) | (SQ_IND_INDEX__FORCE_READ_MASK)); - return RREG32_SOC15(GC, 0, regSQ_IND_DATA); + return RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_IND_DATA); } static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t regno, uint32_t num, uint32_t *out) { - WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, GET_INST(GC, 0), regSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (regno << SQ_IND_INDEX__INDEX__SHIFT) | @@ -623,7 +623,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, (SQ_IND_INDEX__FORCE_READ_MASK) | (SQ_IND_INDEX__AUTO_INCR_MASK)); while (num--) - *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); + *(out++) = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_IND_DATA); } static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, @@ -669,7 +669,7 @@ static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, u32 me, u32 pipe, u32 q, u32 vm) { - soc15_grbm_select(adev, me, pipe, q, vm, 0); + soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, 0)); } static enum amdgpu_gfx_partition @@ -719,7 +719,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, num_xcc_per_partition); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, i % num_xcc_per_partition); - WREG32_SOC15(GC, i, regCP_HYP_XCP_CTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); } if (adev->nbio.funcs->set_compute_partition_mode) @@ -755,7 +755,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; - gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG); break; default: BUG(); @@ -957,8 +957,8 @@ static u32 gfx_v9_4_3_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); - data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); + data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_RB_BACKEND_DISABLE); + data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_RB_BACKEND_DISABLE); data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; @@ -1014,21 +1014,21 @@ static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev, int xcc_id) mutex_lock(&adev->srbm_mutex); for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { - soc15_grbm_select(adev, 0, 0, 0, i, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); /* CP and shaders */ - WREG32_SOC15_RLC(GC, xcc_id, regSH_MEM_CONFIG, sh_mem_config); - WREG32_SOC15_RLC(GC, xcc_id, regSH_MEM_BASES, sh_mem_bases); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); } - soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); /* Initialize all compute VMIDs to have no GDS, GWS, or OA acccess. These should be enabled by FW for target VMIDs. */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_BASE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_SIZE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_GWS_VMID0, i, 0); - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_OA_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0); } } @@ -1043,10 +1043,10 @@ static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) * access so that HWS firmware can save/restore entries. */ for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_BASE, 2 * vmid, 0); - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_SIZE, 2 * vmid, 0); - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_GWS_VMID0, vmid, 0); - WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_OA_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0); } } @@ -1057,42 +1057,42 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), GRBM_CNTL, READ_TIMEOUT, 0xff); gfx_v9_4_3_setup_rb(adev, i); } gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); - adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, regDB_DEBUG2); + adev->gfx.config.db_debug2 = RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { for (j = 0; j < num_xcc; j++) { - soc15_grbm_select(adev, 0, 0, 0, i, j); + soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, j)); /* CP and shaders */ if (i == 0) { tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, !!adev->gmc.noretry); - WREG32_SOC15_RLC(GC, j, regSH_MEM_CONFIG, tmp); - WREG32_SOC15_RLC(GC, j, regSH_MEM_BASES, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_CONFIG, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_BASES, 0); } else { tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, !!adev->gmc.noretry); - WREG32_SOC15_RLC(GC, j, regSH_MEM_CONFIG, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_CONFIG, tmp); tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, (adev->gmc.private_aperture_start >> 48)); tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, (adev->gmc.shared_aperture_start >> 48)); - WREG32_SOC15_RLC(GC, j, regSH_MEM_BASES, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_BASES, tmp); } } } - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); mutex_unlock(&adev->srbm_mutex); @@ -1105,18 +1105,18 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev, int xcc_id) { - WREG32_FIELD15_PREREG(GC, xcc_id, RLC_SRM_CNTL, SRM_ENABLE, 1); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); } static void gfx_v9_4_3_init_csb(struct amdgpu_device *adev, int xcc_id) { adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_ADDR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI), adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_ADDR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO), adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_LENGTH), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_CSIB_LENGTH), adev->gfx.rlc.clear_state_size); } @@ -1137,7 +1137,7 @@ static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS)) { - WREG32_SOC15(GC, 0, regRLC_JUMP_TABLE_RESTORE, + WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); } } @@ -1146,9 +1146,9 @@ static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; - data = RREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG); + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; - WREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); } static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) @@ -1160,16 +1160,16 @@ static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) switch (num_xcc) { /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ case 1: - WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, 0x8); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8); break; case 2: tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID); tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); - WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp); tmp = xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, PHYSICAL_XCC_ID); tmp = tmp | (xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, XCC_DIE_ID)); - WREG32_SOC15(GC, xcc_id, regCP_PSP_XCP_CTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_PSP_XCP_CTL, tmp); break; default: break; @@ -1181,7 +1181,7 @@ static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) uint32_t rlc_setting; /* if RLC is not enabled, do nothing */ - rlc_setting = RREG32_SOC15(GC, 0, regRLC_CNTL); + rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) return false; @@ -1195,11 +1195,11 @@ static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev, int xcc_id) data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - WREG32_SOC15(GC, xcc_id, regRLC_SAFE_MODE, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); /* wait for RLC_SAFE_MODE */ for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -1210,7 +1210,7 @@ static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) uint32_t data; data = RLC_SAFE_MODE__CMD_MASK; - WREG32_SOC15(GC, xcc_id, regRLC_SAFE_MODE, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); } static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) @@ -1247,7 +1247,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); for (k = 0; k < adev->usec_timeout; k++) { - if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0) + if (RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SERDES_CU_MASTER_BUSY) == 0) break; udelay(1); } @@ -1270,7 +1270,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; for (k = 0; k < adev->usec_timeout; k++) { - if ((RREG32_SOC15(GC, 0, regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) + if ((RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) break; udelay(1); } @@ -1283,13 +1283,13 @@ static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, /* These interrupts should be enabled to drive DS clock */ - tmp = RREG32_SOC15(GC, xcc_id, regCP_INT_CNTL_RING0); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); - WREG32_SOC15(GC, xcc_id, regCP_INT_CNTL_RING0, tmp); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); } static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) @@ -1298,7 +1298,7 @@ static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); gfx_v9_4_3_wait_for_rlc_serdes(adev, i); } @@ -1310,9 +1310,9 @@ static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); - WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); udelay(50); } } @@ -1326,7 +1326,7 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), RLC_CNTL, RLC_ENABLE_F32, 1); udelay(50); /* carrizo do enable cp interrupt after cp inited */ @@ -1337,18 +1337,18 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) #ifdef AMDGPU_RLC_DEBUG_RETRY /* RLC_GPM_GENERAL_6 : RLC Ucode version */ - rlc_ucode_ver = RREG32_SOC15(GC, i, regRLC_GPM_GENERAL_6); + rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); if (rlc_ucode_ver == 0x108) { dev_info(adev->dev, "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", rlc_ucode_ver, adev->gfx.rlc_fw_version); /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, * default is 0x9C4 to create a 100us interval */ - WREG32_SOC15(GC, i, regRLC_GPM_TIMER_INT_3, 0x9C4); + WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4); /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr * to disable the page fault retry interrupts, default is * 0x100 (256) */ - WREG32_SOC15(GC, i, regRLC_GPM_GENERAL_12, 0x100); + WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100); } #endif } @@ -1370,16 +1370,16 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id) le32_to_cpu(hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_ADDR, + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, RLCG_UCODE_LOADING_START_ADDRESS); for (i = 0; i < fw_size; i++) { if (amdgpu_emu_mode == 1 && i % 100 == 0) { dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); msleep(1); } - WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); } - WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); return 0; } @@ -1393,7 +1393,7 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { /* disable CG */ - WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0); + WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CGCG_CGLS_CTRL, 0); gfx_v9_4_3_init_pg(adev, i); @@ -1415,7 +1415,7 @@ static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, { u32 reg, data; - reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); + reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); else @@ -1425,9 +1425,9 @@ static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; if (amdgpu_sriov_is_pp_one_vf(adev)) - WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); + WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); else - WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); + WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); } static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { @@ -1439,7 +1439,7 @@ static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, uint32_t offset, struct soc15_reg_rlcg *entries, int arr_size) { - int i; + int i, inst; uint32_t reg; if (!entries) @@ -1449,7 +1449,12 @@ static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, const struct soc15_reg_rlcg *entry; entry = &entries[i]; - reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + inst = adev->ip_map.logical_to_dev_inst ? + adev->ip_map.logical_to_dev_inst( + adev, entry->hwip, entry->instance) : + entry->instance; + reg = adev->reg_offset[entry->hwip][inst][entry->segment] + + entry->reg; if (offset == reg) return true; } @@ -1468,9 +1473,9 @@ static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev, bool enable, int xcc_id) { if (enable) { - WREG32_SOC15_RLC(GC, xcc_id, regCP_MEC_CNTL, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); } else { - WREG32_SOC15_RLC(GC, xcc_id, regCP_MEC_CNTL, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); adev->gfx.kiq[xcc_id].ring.sched.ready = false; } @@ -1501,17 +1506,17 @@ static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev, tmp = 0; tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_CNTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); - WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_LO, + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); - WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_HI, + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); mec_ucode_addr_offset = - SOC15_REG_OFFSET(GC, xcc_id, regCP_MEC_ME1_UCODE_ADDR); + SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR); mec_ucode_data_offset = - SOC15_REG_OFFSET(GC, xcc_id, regCP_MEC_ME1_UCODE_DATA); + SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA); /* MEC1 */ WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); @@ -1532,12 +1537,12 @@ static void gfx_v9_4_3_kiq_setting(struct amdgpu_ring *ring, int xcc_id) struct amdgpu_device *adev = ring->adev; /* tell RLC which is KIQ queue */ - tmp = RREG32_SOC15(GC, xcc_id, regRLC_CP_SCHEDULERS); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); tmp &= 0xffffff00; tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); - WREG32_SOC15_RLC(GC, xcc_id, regRLC_CP_SCHEDULERS, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); tmp |= 0x80; - WREG32_SOC15_RLC(GC, xcc_id, regRLC_CP_SCHEDULERS, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); } static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) @@ -1580,14 +1585,14 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_EOP_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); mqd->cp_hqd_eop_control = tmp; /* enable doorbell? */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL); if (ring->use_doorbell) { tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -1617,7 +1622,7 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); /* set MQD vmid to 0 */ - tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_MQD_CONTROL); tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); mqd->cp_mqd_control = tmp; @@ -1627,7 +1632,7 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); /* set up the HQD, this is similar to CP_RB0_CNTL */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, (order_base_2(ring->ring_size / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, @@ -1654,23 +1659,23 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ ring->wptr = 0; - mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_RPTR); /* set the vmid for the queue */ mqd->cp_hqd_vmid = 0; - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PERSISTENT_STATE); tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); mqd->cp_hqd_persistent_state = tmp; /* set MIN_IB_AVAIL_SIZE */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_IB_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; /* set static priority for a queue/ring */ gfx_v9_4_3_mqd_set_priority(ring, mqd); - mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, regCP_HQD_QUANTUM); + mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_QUANTUM); /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. @@ -1690,94 +1695,94 @@ static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id) /* disable wptr polling */ WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_WPTR_POLL_CNTL, EN, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_BASE_ADDR, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_BASE_ADDR_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi); /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_CONTROL, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, mqd->cp_hqd_eop_control); /* enable doorbell? */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); /* disable the queue if it's active */ - if (RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1) { - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 1); + if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); for (j = 0; j < adev->usec_timeout; j++) { - if (!(RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1)) + if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) break; udelay(1); } - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi); } /* set the pointer to the MQD */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_BASE_ADDR, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_BASE_ADDR_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); /* set MQD vmid to 0 */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_CONTROL, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, mqd->cp_mqd_control); /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_BASE, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_BASE_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); /* set up the HQD, this is similar to CP_RB0_CNTL */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_CONTROL, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); /* set the wb address whether it's enabled or not */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR_REPORT_ADDR, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, mqd->cp_hqd_pq_rptr_report_addr_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, mqd->cp_hqd_pq_rptr_report_addr_hi); /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_POLL_ADDR, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi); /* enable the doorbell if requested */ if (ring->use_doorbell) { - WREG32_SOC15(GC, xcc_id, regCP_MEC_DOORBELL_RANGE_LOWER, + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER, (adev->doorbell_index.kiq * 2) << 2); - WREG32_SOC15(GC, xcc_id, regCP_MEC_DOORBELL_RANGE_UPPER, + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER, (adev->doorbell_index.userqueue_end * 2) << 2); } - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi); /* set the vmid for the queue */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_VMID, mqd->cp_hqd_vmid); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PERSISTENT_STATE, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); /* activate the queue */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_ACTIVE, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, mqd->cp_hqd_active); if (ring->use_doorbell) @@ -1792,12 +1797,12 @@ static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring, int xcc_id) int j; /* disable the queue if it's active */ - if (RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1) { + if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 1); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); for (j = 0; j < adev->usec_timeout; j++) { - if (!(RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1)) + if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) break; udelay(1); } @@ -1806,21 +1811,21 @@ static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring, int xcc_id) DRM_DEBUG("KIQ dequeue request failed.\n"); /* Manual disable if dequeue request times out */ - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_ACTIVE, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); } - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0); } - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_IQ_TIMER, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_IB_CONTROL, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PERSISTENT_STATE, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, 0); - WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0); return 0; } @@ -1848,19 +1853,19 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) ring->wptr = 0; amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); gfx_v9_4_3_kiq_init_register(ring, xcc_id); - soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); gfx_v9_4_3_mqd_init(ring); gfx_v9_4_3_kiq_init_register(ring, xcc_id); - soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[xcc_id].mqd_backup) @@ -1888,9 +1893,9 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); gfx_v9_4_3_mqd_init(ring); - soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) @@ -2049,9 +2054,9 @@ static int gfx_v9_4_3_hw_fini(void *handle) mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, adev->gfx.kiq[i].ring.me, adev->gfx.kiq[i].ring.pipe, - adev->gfx.kiq[i].ring.queue, 0, i); + adev->gfx.kiq[i].ring.queue, 0, GET_INST(GC, i)); gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[i].ring, i); - soc15_grbm_select(adev, 0, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, i)); mutex_unlock(&adev->srbm_mutex); } @@ -2085,7 +2090,7 @@ static bool gfx_v9_4_3_is_idle(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS), + if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) return false; } @@ -2112,7 +2117,7 @@ static int gfx_v9_4_3_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ - tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | @@ -2131,7 +2136,7 @@ static int gfx_v9_4_3_soft_reset(void *handle) } /* GRBM_STATUS2 */ - tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS2); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2); if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); @@ -2145,17 +2150,17 @@ static int gfx_v9_4_3_soft_reset(void *handle) gfx_v9_4_3_cp_compute_enable(adev, false, 0); if (grbm_soft_reset) { - tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); tmp |= grbm_soft_reset; dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); udelay(50); tmp &= ~grbm_soft_reset; - WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); } /* Wait a little for things to settle down */ @@ -2174,22 +2179,22 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, /* GDS Base */ gfx_v9_4_3_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, + SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid, gds_base); /* GDS Size */ gfx_v9_4_3_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, + SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid, gds_size); /* GWS */ gfx_v9_4_3_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, + SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); /* OA */ gfx_v9_4_3_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, + SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid, (1 << (oa_size + oa_base)) - (1 << oa_base)); } @@ -2260,7 +2265,7 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { /* 1 - RLC_CGTT_MGCG_OVERRIDE */ - def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | @@ -2270,28 +2275,28 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; if (def != data) - WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); /* MGLS is a global flag to control all MGLS in GFX */ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { /* 2 - RLC memory Light sleep */ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { - def = data = RREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL); + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; if (def != data) - WREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); } /* 3 - CP memory Light sleep */ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { - def = data = RREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL); + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; if (def != data) - WREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); } } } else { /* 1 - MGCG_OVERRIDE */ - def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -2299,20 +2304,20 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); if (def != data) - WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); /* 2 - disable MGLS in RLC */ - data = RREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL); + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - WREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); } /* 3 - disable MGLS in CP */ - data = RREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL); + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - WREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); } } @@ -2327,7 +2332,7 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { - def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE); + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); /* unset CGCG override */ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) @@ -2336,10 +2341,10 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; /* update CGCG and CGLS override bits */ if (def != data) - WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); /* enable cgcg FSM(0x0000363F) */ - def = RREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL); + def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); if (adev->asic_type == CHIP_ARCTURUS) data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | @@ -2351,21 +2356,21 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; if (def != data) - WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); /* set IDLE_POLL_COUNT(0x00900100) */ - def = RREG32_SOC15(GC, xcc_id, regCP_RB_WPTR_POLL_CNTL); + def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); if (def != data) - WREG32_SOC15(GC, xcc_id, regCP_RB_WPTR_POLL_CNTL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); } else { - def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL); + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); /* reset CGCG/CGLS bits */ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); /* disable cgcg and cgls in FSM */ if (def != data) - WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); } amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); @@ -2444,12 +2449,12 @@ static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags) *flags = 0; /* AMD_CG_SUPPORT_GFX_MGCG */ - data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_CGTT_MGCG_OVERRIDE)); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE)); if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) *flags |= AMD_CG_SUPPORT_GFX_MGCG; /* AMD_CG_SUPPORT_GFX_CGCG */ - data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_CGCG_CGLS_CTRL)); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL)); if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CGCG; @@ -2458,12 +2463,12 @@ static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_GFX_CGLS; /* AMD_CG_SUPPORT_GFX_RLC_LS */ - data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_MEM_SLP_CNTL)); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL)); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_CP_LS */ - data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regCP_MEM_SLP_CNTL)); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL)); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; } @@ -2634,7 +2639,7 @@ static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); - amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ } @@ -2710,16 +2715,16 @@ static void gfx_v9_4_3_set_compute_eop_interrupt_state(struct amdgpu_device *ade if (me == 1) { switch (pipe) { case 0: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE0_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); break; case 1: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE1_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); break; case 2: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE2_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); break; case 3: - mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE3_INT_CNTL); + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -2760,7 +2765,7 @@ static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: for (i = 0; i < num_xcc; i++) - WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); break; @@ -2783,7 +2788,7 @@ static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: for (i = 0; i < num_xcc; i++) - WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); break; @@ -2943,16 +2948,16 @@ static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, switch (pipe) { case 0: - wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS0); + wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0); break; case 1: - wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS1); + wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1); break; case 2: - wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS2); + wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2); break; case 3: - wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS3); + wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3); break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -2974,7 +2979,7 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) */ val = enable ? 0x1f : 0x07ffffff; amdgpu_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_GFX), + SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX), val); /* Restrict waves for normal/low priority compute queues as well @@ -3161,15 +3166,15 @@ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); + WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data); } static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); + data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG); data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index e35365ab3f1f..c26ac0662c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -35,7 +35,7 @@ static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev) { - return (u64)RREG32_SOC15(GC, 0, regMC_VM_FB_OFFSET) << 24; + return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24; } static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, @@ -48,12 +48,12 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; - WREG32_SOC15_OFFSET(GC, i, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15_OFFSET(GC, i, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); @@ -79,31 +79,31 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { if (adev->gmc.pdb0_bo) { - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.fb_start >> 12)); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, (u32)(adev->gmc.fb_start >> 44)); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, (u32)(adev->gmc.gart_end >> 44)); } else { - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, (u32)(adev->gmc.gart_start >> 44)); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, (u32)(adev->gmc.gart_end >> 44)); } @@ -119,13 +119,13 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { /* Program the AGP BAR */ - WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0); - WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { /* Program the system aperture low logical page number. */ - WREG32_SOC15_RLC(GC, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->apu_flags & AMD_APU_IS_RAVEN2) @@ -136,44 +136,44 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) * aperture high address (add 1) to get rid of the VM * fault and hardware hang. */ - WREG32_SOC15_RLC(GC, i, + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18)); else - WREG32_SOC15_RLC(GC, i, + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); - WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); - WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, (u32)(value >> 44)); /* Program "protection fault". */ - WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, (u32)((u64)adev->dummy_page_addr >> 44)); - tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL2); + tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); - WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp); } /* In the case squeezing vram into GART aperture, we don't use * FB aperture and AGP aperture. Disable them. */ if (adev->gmc.pdb0_bo) { - WREG32_SOC15(GC, i, regMC_VM_FB_LOCATION_TOP, 0); - WREG32_SOC15(GC, i, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); - WREG32_SOC15(GC, i, regMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, i, regMC_VM_AGP_BOT, 0xFFFFFF); - WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); - WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); + WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); } } } @@ -186,7 +186,7 @@ static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { /* Setup TLB control */ - tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL); + tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); @@ -200,7 +200,7 @@ static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); - WREG32_SOC15_RLC(GC, i, regMC_VM_MX_L1_TLB_CNTL, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp); } } @@ -212,7 +212,7 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { /* Setup L2 cache */ - tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL); + tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ @@ -221,12 +221,12 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); - WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp); - tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL2); + tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); - WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL2, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp); tmp = regVM_L2_CNTL3_DEFAULT; if (adev->gmc.translate_further) { @@ -238,7 +238,7 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); } - WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL3, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp); tmp = regVM_L2_CNTL4_DEFAULT; if (adev->gmc.xgmi.connected_to_cpu) { @@ -248,7 +248,7 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); } - WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL4, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp); } } @@ -259,7 +259,7 @@ static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL); + tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, adev->gmc.vmid0_page_table_depth); @@ -267,7 +267,7 @@ static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) adev->gmc.vmid0_page_table_block_size); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); - WREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp); } } @@ -277,23 +277,23 @@ static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 0x0000000F); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); - WREG32_SOC15(GC, i, + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); } } @@ -316,7 +316,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { - tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i); + tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, num_level); @@ -348,19 +348,19 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) !adev->gmc.noretry || adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)); - WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); - WREG32_SOC15_OFFSET(GC, j, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(GC, j, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(GC, j, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i * hub->ctx_addr_distance, lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(GC, j, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); @@ -378,9 +378,9 @@ static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0 ; i < 18; ++i) { - WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); - WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, i * hub->eng_addr_distance, 0x1f); } } @@ -398,9 +398,9 @@ static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) * VF copy registers so vbios post doesn't program them, for * SRIOV driver need to program them */ - WREG32_SOC15_RLC(GC, i, regMC_VM_FB_LOCATION_BASE, + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, adev->gmc.vram_start >> 24); - WREG32_SOC15_RLC(GC, i, regMC_VM_FB_LOCATION_TOP, + WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, adev->gmc.vram_end >> 24); } } @@ -432,23 +432,23 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; /* Disable all tables */ for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT0_CNTL, + WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); /* Setup TLB control */ - tmp = RREG32_SOC15(GC, j, regMC_VM_MX_L1_TLB_CNTL); + tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); - WREG32_SOC15_RLC(GC, j, regMC_VM_MX_L1_TLB_CNTL, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp); /* Setup L2 cache */ - tmp = RREG32_SOC15(GC, j, regVM_L2_CNTL); + tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(GC, j, regVM_L2_CNTL, tmp); - WREG32_SOC15(GC, j, regVM_L2_CNTL3, 0); + WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0); } } @@ -466,7 +466,7 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); + tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, @@ -497,7 +497,7 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, CRASH_ON_RETRY_FAULT, 1); } - WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp); } } @@ -511,24 +511,24 @@ static void gfxhub_v1_2_init(struct amdgpu_device *adev) hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; hub->ctx0_ptb_addr_lo32 = - SOC15_REG_OFFSET(GC, i, + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); hub->ctx0_ptb_addr_hi32 = - SOC15_REG_OFFSET(GC, i, + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); hub->vm_inv_eng0_sem = - SOC15_REG_OFFSET(GC, i, regVM_INVALIDATE_ENG0_SEM); + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = - SOC15_REG_OFFSET(GC, i, regVM_INVALIDATE_ENG0_REQ); + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = - SOC15_REG_OFFSET(GC, i, regVM_INVALIDATE_ENG0_ACK); + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK); hub->vm_context0_cntl = - SOC15_REG_OFFSET(GC, i, regVM_CONTEXT0_CNTL); + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL); hub->vm_l2_pro_fault_status = - SOC15_REG_OFFSET(GC, i, + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_STATUS); hub->vm_l2_pro_fault_cntl = - SOC15_REG_OFFSET(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); + SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL); hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; @@ -551,9 +551,9 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev) u32 max_region; u64 seg_size; - xgmi_lfb_cntl = RREG32_SOC15(GC, 0, regMC_VM_XGMI_LFB_CNTL); + xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL); seg_size = REG_GET_FIELD( - RREG32_SOC15(GC, 0, regMC_VM_XGMI_LFB_SIZE), + RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; max_region = REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index eb35096756b8..39e4406da4ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -24,6 +24,9 @@ #ifndef __SOC15_COMMON_H__ #define __SOC15_COMMON_H__ +/* GET_INST returns the physical instance corresponding to a logical instance */ +#define GET_INST(ip, inst) (adev->ip_map.logical_to_dev_inst? adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst): inst) + /* Register Access Macros */ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) #define SOC15_REG_OFFSET1(ip, inst, reg, offset) \ From 4db6f200a5fef12666ab66e54bacb65c5d675e9d Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 16:26:49 +0530 Subject: [PATCH 319/861] drm/amdgpu: Add mask for SDMA instances Add a mask of SDMA instances available for use. On certain ASIC configs, not all SDMA instances are available for software use. v2: Change sdma mask type to uint32_t (Le) Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 632b77138fe4..62afb282a3ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -48,6 +48,8 @@ enum amdgpu_sdma_irq { AMDGPU_SDMA_IRQ_LAST }; +#define NUM_SDMA(x) hweight32(x) + struct amdgpu_sdma_instance { /* SDMA firmware */ const struct firmware *fw; @@ -75,6 +77,7 @@ struct amdgpu_sdma { struct amdgpu_irq_src srbm_write_irq; int num_instances; + uint32_t sdma_mask; int num_inst_per_aid; uint32_t srbm_soft_reset; bool has_page_queue; From f8b34a0518701bae8bb02fabe129f01b1dc33336 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 21:04:39 +0530 Subject: [PATCH 320/861] drm/amdgpu: Use instance table for sdma 4.4.2 For ASICs with sdma IP v4.4.2, add mapping for logical to physical instances. v2: Register accesses on bare metal should be based on physical instance. Use GET_INST() to get physical instance. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 7 ++-- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 7 +++- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 38 ++++++++++++++----- 3 files changed, 38 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 5daec0b45545..772774615cb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -43,9 +43,10 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, engine_id, - regSDMA_RLC0_RB_CNTL) - - regSDMA_RLC0_RB_CNTL; + uint32_t sdma_engine_reg_base = + SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, engine_id), + regSDMA_RLC0_RB_CNTL) - + regSDMA_RLC0_RB_CNTL; uint32_t retval = sdma_engine_reg_base + queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 5a5bab665f60..e6c1004e4e76 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -75,7 +75,10 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan bool use_doorbell, int doorbell_index, int doorbell_size) { u32 doorbell_range = 0, doorbell_ctrl = 0; - int aid_id = adev->sdma.instance[instance].aid_id; + int aid_id, dev_inst; + + dev_inst = GET_INST(SDMA0, instance); + aid_id = adev->sdma.instance[instance].aid_id; if (use_doorbell == false) return; @@ -93,7 +96,7 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size); - switch (instance % adev->sdma.num_inst_per_aid) { + switch (dev_inst % adev->sdma.num_inst_per_aid) { case 0: WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1) + 4 * aid_id, doorbell_range); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 9b53174925f8..4350939105c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -57,7 +57,9 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev); static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) { - return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset); + u32 dev_inst = GET_INST(SDMA0, instance); + + return (adev->reg_offset[SDMA0_HWIP][dev_inst][0] + offset); } static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num) @@ -1475,16 +1477,31 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t instance; + uint32_t instance, i; DRM_DEBUG("IH: SDMA trap\n"); instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); - instance += node_id_to_phys_map[entry->node_id] * - adev->sdma.num_inst_per_aid; + + /* Client id gives the SDMA instance in AID. To know the exact SDMA + * instance, interrupt entry gives the node id which corresponds to the AID instance. + * Match node id with the AID id associated with the SDMA instance. */ + for (i = instance; i < adev->sdma.num_instances; + i += adev->sdma.num_inst_per_aid) { + if (adev->sdma.instance[i].aid_id == + node_id_to_phys_map[entry->node_id]) + break; + } + + if (i >= adev->sdma.num_instances) { + dev_WARN_ONCE( + adev->dev, 1, + "Couldn't find the right sdma instance in trap handler"); + return 0; + } switch (entry->ring_id) { case 0: - amdgpu_fence_process(&adev->sdma.instance[instance].ring); + amdgpu_fence_process(&adev->sdma.instance[i].ring); break; default: break; @@ -1717,12 +1734,12 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) *flags = 0; /* AMD_CG_SUPPORT_SDMA_MGCG */ - data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL)); + data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_CLK_CTRL)); if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK)) *flags |= AMD_CG_SUPPORT_SDMA_MGCG; /* AMD_CG_SUPPORT_SDMA_LS */ - data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL)); + data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_POWER_CNTL)); if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) *flags |= AMD_CG_SUPPORT_SDMA_LS; } @@ -1809,7 +1826,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, dev_inst; for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs; @@ -1820,7 +1837,10 @@ static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) adev->sdma.instance[i].page.me = i; } - adev->sdma.instance[i].aid_id = i / adev->sdma.num_inst_per_aid; + dev_inst = GET_INST(SDMA0, i); + /* AID to which SDMA belongs depends on physical instance */ + adev->sdma.instance[i].aid_id = + dev_inst / adev->sdma.num_inst_per_aid; } } From 7aa8a266aaa25e9e2f85d9d2d594cdff6b5635f2 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 6 Jul 2022 13:50:45 +0530 Subject: [PATCH 321/861] drm/amdgpu: Fix GRBM programming sequence It needs to be done only for XCC instances in non-AID0. Use the physical instance to determine non-AID0 XCC instances. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index e9c12b4970f9..43126f7b70ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -186,11 +186,14 @@ static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) { - int i, num_xcc; + int i, num_xcc, dev_inst; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 2; i < num_xcc; i++) - WREG32_SOC15(GC, GET_INST(GC, i), regGRBM_MCM_ADDR, 0x4); + for (i = 0; i < num_xcc; i++) { + dev_inst = GET_INST(GC, i); + if (dev_inst >= 2) + WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4); + } } static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, From 2e10ced47ff261d9dba439c5bcecf68d0c1b7de5 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 19:39:34 -0400 Subject: [PATCH 322/861] drm/amdgpu/nbio: add vcn doorbell multiple AIDs support Update vcn doorbell range to support multiple AIDs. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h | 1 + drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h index 70a5f030d5f7..22483dc66351 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h @@ -42,6 +42,7 @@ #define regUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x4043 #define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094 +#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index e6c1004e4e76..bcd3e08d01cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -202,6 +202,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do int doorbell_index, int instance) { u32 doorbell_range = 0, doorbell_ctrl = 0; + u32 aid_id = instance; if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, @@ -212,6 +213,11 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do DOORBELL0_CTRL_ENTRY_0, BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0x9); + if (aid_id) + doorbell_range = REG_SET_FIELD(doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + DOORBELL0_FENCE_ENABLE_ENTRY, + 0x4); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -228,6 +234,13 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4); + + WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17) + + aid_id, doorbell_range); + WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); } else { doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, @@ -235,10 +248,11 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_RANGE_SIZE, 0); - } - WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, doorbell_range); - WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_ctrl); + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, doorbell_range); + WREG32(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL), + doorbell_ctrl); + } } static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev, From d4ad24a0b796ad429403bf17ba97ee7e2470ad68 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 2 Jul 2022 19:53:36 -0400 Subject: [PATCH 323/861] drm/amdgpu/jpeg: add JPEG multiple AIDs support Add JPEG multiple AIDs support. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 354 ++++++++++++++--------- 1 file changed, 214 insertions(+), 140 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index e12e3646c49a..aa14a6619e9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -63,6 +63,8 @@ static int jpeg_v4_0_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; + jpeg_v4_0_3_set_dec_ring_funcs(adev); jpeg_v4_0_3_set_irq_funcs(adev); @@ -80,12 +82,12 @@ static int jpeg_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, r; + int i, j, r; - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { /* JPEG TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - amdgpu_ih_srcid_jpeg[i], &adev->jpeg.inst->irq); + amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq); if (r) return r; } @@ -98,22 +100,27 @@ static int jpeg_v4_0_3_sw_init(void *handle) if (r) return r; - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { - ring = &adev->jpeg.inst->ring_dec[i]; - ring->use_doorbell = true; - ring->vm_hub = AMDGPU_MMHUB0(0); - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; - sprintf(ring->name, "jpeg_dec_%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) - return r; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + ring->use_doorbell = true; + ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + j + 9 * i; + sprintf(ring->name, "jpeg_dec_%d.%d", i, j); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; - adev->jpeg.internal.jpeg_pitch[i] = - regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch[i] = - SOC15_REG_OFFSET1(JPEG, 0, regUVD_JRBC0_UVD_JRBC_SCRATCH0, - (i?(0x40 * i - 0xc80):0)); + adev->jpeg.internal.jpeg_pitch[j] = + regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch[j] = + SOC15_REG_OFFSET1(JPEG, i, regUVD_JRBC0_UVD_JRBC_SCRATCH0, + (j?(0x40 * j - 0xc80):0)); + } } return 0; @@ -149,22 +156,30 @@ static int jpeg_v4_0_3_sw_fini(void *handle) static int jpeg_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; - int i, r; + struct amdgpu_ring *ring; + int i, j, r; - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + ring = adev->jpeg.inst[i].ring_dec; - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { - ring = &adev->jpeg.inst->ring_dec[i]; if (ring->use_doorbell) - WREG32_SOC15_OFFSET(VCN, 0, regVCN_JPEG_DB_CTRL, - (ring->pipe?(ring->pipe - 0x15):0), - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); - r = amdgpu_ring_test_helper(ring); - if (r) - return r; + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, + adev->jpeg.inst[i].aid_id); + + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (ring->use_doorbell) + WREG32_SOC15_OFFSET(VCN, i, regVCN_JPEG_DB_CTRL, + (ring->pipe?(ring->pipe - 0x15):0), + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } } DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); @@ -233,48 +248,52 @@ static int jpeg_v4_0_3_resume(void *handle) return r; } -static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) +static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; int i; - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); - if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else + data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1)); + } else { data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + } data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE); data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i); - WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE, data); } -static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) +static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; int i; - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); - if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else + data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1); + } else { data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + } data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE); data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i); - WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE, data); } /** @@ -286,58 +305,63 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) */ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; - int i; + struct amdgpu_ring *ring; + int i, j; - WREG32_SOC15(JPEG, 0, regUVD_PGFSM_CONFIG, - 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDJ_PWR_ON << - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + WREG32_SOC15(JPEG, i, regUVD_PGFSM_CONFIG, + 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, i, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_ON << + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); - /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - /* JPEG disable CGC */ - jpeg_v4_0_3_disable_clock_gating(adev); + /* JPEG disable CGC */ + jpeg_v4_0_3_disable_clock_gating(adev, i); - /* MJPEG global tiling registers */ - WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { - unsigned int reg_offset = (i?(0x40 * i - 0xc80):0); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ring = &adev->jpeg.inst->ring_dec[i]; + ring = &adev->jpeg.inst[i].ring_dec[j]; - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC0_MASK << i, - ~(JPEG_SYS_INT_EN__DJRBC0_MASK << i)); + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC0_MASK << j, + ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j)); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, - (0x00000001L | 0x00000002L)); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, - reg_offset, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - reg_offset, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, - 0x00000002L); - WREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE, reg_offset, - ring->ring_size / 4); - ring->wptr = RREG32_SOC15_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, - reg_offset); + WREG32_SOC15_OFFSET(JPEG, i, + regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, + (0x00000001L | 0x00000002L)); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, + reg_offset, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + reg_offset, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_RPTR, reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, + 0x00000002L); + WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_SIZE, reg_offset, + ring->ring_size / 4); + ring->wptr = RREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_WPTR, + reg_offset); + } } return 0; @@ -352,24 +376,31 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) */ static int jpeg_v4_0_3_stop(struct amdgpu_device *adev) { - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + int i; - jpeg_v4_0_3_enable_clock_gating(adev); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; - /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); - WREG32_SOC15(JPEG, 0, regUVD_PGFSM_CONFIG, - 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDJ_PWR_OFF << - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + jpeg_v4_0_3_enable_clock_gating(adev, i); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + WREG32_SOC15(JPEG, i, regUVD_PGFSM_CONFIG, + 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, i, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_OFF << + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + } return 0; } @@ -502,10 +533,28 @@ static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); amdgpu_ring_write(ring, 0); + if (ring->adev->jpeg.inst[ring->me].aid_id) { + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x4); + } else { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x3fbc); + if (ring->adev->jpeg.inst[ring->me].aid_id) { + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x0); + } else { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x1); @@ -651,15 +700,19 @@ static bool jpeg_v4_0_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool ret; - int i; + int i, j; - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { - unsigned int reg_offset = (i?(0x40 * i - 0xc80):0); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ret &= ((RREG32_SOC15_OFFSET(JPEG, 0, - regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset) & - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + ret &= ((RREG32_SOC15_OFFSET(JPEG, i, + regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset) & + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + } } return ret; @@ -669,17 +722,20 @@ static int jpeg_v4_0_3_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; - int i; + int i, j; - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { - unsigned int reg_offset = (i?(0x40 * i - 0xc80):0); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, 0, - regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset, - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, i, + regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset, + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + } } - return ret; } @@ -688,15 +744,19 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; - if (enable) { - if (!jpeg_v4_0_3_is_idle(handle)) - return -EBUSY; - jpeg_v4_0_3_enable_clock_gating(adev); - } else { - jpeg_v4_0_3_disable_clock_gating(adev); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + if (enable) { + if (!jpeg_v4_0_3_is_idle(handle)) + return -EBUSY; + jpeg_v4_0_3_enable_clock_gating(adev, i); + } else { + jpeg_v4_0_3_disable_clock_gating(adev, i); + } } - return 0; } @@ -732,32 +792,35 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t i; + + i = node_id_to_phys_map[entry->node_id]; DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); switch (entry->src_id) { case VCN_4_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[0]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[0]); break; case VCN_4_0__SRCID__JPEG1_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[1]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[1]); break; case VCN_4_0__SRCID__JPEG2_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[2]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[2]); break; case VCN_4_0__SRCID__JPEG3_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[3]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[3]); break; case VCN_4_0__SRCID__JPEG4_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[4]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[4]); break; case VCN_4_0__SRCID__JPEG5_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[5]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[5]); break; case VCN_4_0__SRCID__JPEG6_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[6]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[6]); break; case VCN_4_0__SRCID__JPEG7_DECODE: - amdgpu_fence_process(&adev->jpeg.inst->ring_dec[7]); + amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[7]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -798,7 +861,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */ - 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ + 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ 8 + 16, .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, @@ -819,12 +882,17 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, j; - for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) { - adev->jpeg.inst->ring_dec[i].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; - adev->jpeg.inst->ring_dec[i].me = 0; - adev->jpeg.inst->ring_dec[i].pipe = i; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec[j].me = i; + adev->jpeg.inst[i].ring_dec[j].pipe = j; + } + adev->jpeg.inst[i].aid_id = i / adev->jpeg.num_inst_per_aid; } DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); } @@ -836,7 +904,13 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = { static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->irq.num_types = adev->jpeg.num_jpeg_rings; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; + } adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; } From 7229bd6fe02865a9fc324b4f062268f53190b5f4 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 11 Jul 2022 11:05:05 -0400 Subject: [PATCH 324/861] drm/amdgpu/vcn: update clock gate setting for VCN 4.0.3 Update clock gate setting. Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 30 +++++++++++++------------ 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 0b2b97593bac..a9f06f3b00eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -424,13 +424,14 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indir static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + int inst_idx = 0; + + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + return; /* VCN disable CGC */ - data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); - if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) - data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else - data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); @@ -517,11 +518,11 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, { uint32_t reg_data = 0; - /* enable sw clock gating control */ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) - reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else - reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + return; + + /* enable sw clock gating control */ + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK | @@ -563,13 +564,14 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + int inst_idx = 0; + + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + return; /* enable VCN CGC */ - data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); - if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) - data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else - data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); From 762702ebd324596def832a2b2a1cfd24fff78338 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 11 Jul 2022 11:06:46 -0400 Subject: [PATCH 325/861] drm/amdgpu/vcn: add vcn multiple AIDs support add vcn multiple AIDs support. v2: squash in FW setting fix (Alex) Signed-off-by: James Zhu Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 806 +++++++++++++----------- 1 file changed, 432 insertions(+), 374 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index a9f06f3b00eb..8db50a50b868 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -79,9 +79,8 @@ static int vcn_v4_0_3_early_init(void *handle) static int vcn_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; - int r; + int i, r; r = amdgpu_vcn_sw_init(adev); if (r) @@ -99,20 +98,27 @@ static int vcn_v4_0_3_sw_init(void *handle) if (r) return r; - ring = &adev->vcn.inst->ring_dec; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1); - ring->vm_hub = AMDGPU_MMHUB0(0); - sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, - &adev->vcn.inst->sched_score); - if (r) - return r; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; - fw_shared = adev->vcn.inst->fw_shared.cpu_addr; - fw_shared->present_flag_0 = 0; - fw_shared->sq.is_enabled = cpu_to_le32(true); + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ring = &adev->vcn.inst[i].ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i; + ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); + sprintf(ring->name, "vcn_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, + &adev->vcn.inst[i].sched_score); + if (r) + return r; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = cpu_to_le32(true); + } if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode; @@ -130,15 +136,18 @@ static int vcn_v4_0_3_sw_init(void *handle) static int vcn_v4_0_3_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r, idx; + int i, r, idx; if (drm_dev_enter(&adev->ddev, &idx)) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - - fw_shared = adev->vcn.inst->fw_shared.cpu_addr; - fw_shared->present_flag_0 = 0; - fw_shared->sq.is_enabled = cpu_to_le32(false); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = cpu_to_le32(false); + } drm_dev_exit(idx); } @@ -161,18 +170,30 @@ static int vcn_v4_0_3_sw_fini(void *handle) static int vcn_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; - int r; + struct amdgpu_ring *ring; + int i, r; - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); - if (ring->use_doorbell) - WREG32_SOC15(VCN, ring->me, regVCN_RB4_DB_CTRL, - ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | - VCN_RB4_DB_CTRL__EN_MASK); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + ring = &adev->vcn.inst[i].ring_dec; - r = amdgpu_ring_test_helper(ring); + if (ring->use_doorbell) + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, + adev->vcn.inst[i].aid_id); + if (ring->use_doorbell) + WREG32_SOC15(VCN, ring->me, regVCN_RB4_DB_CTRL, + ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | + VCN_RB4_DB_CTRL__EN_MASK); + + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } + +done: if (!r) DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n", (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); @@ -245,10 +266,11 @@ static int vcn_v4_0_3_resume(void *handle) * vcn_v4_0_3_mc_resume - memory controller programming * * @adev: amdgpu_device pointer + * @inst_idx: instance number * * Let the VCN memory controller know it's offsets */ -static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev) +static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx) { uint32_t offset, size; const struct common_firmware_header *hdr; @@ -258,50 +280,46 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev) /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET0, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst->gpu_addr)); - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); offset = size; - if (amdgpu_emu_mode == 1) - /* No signed header here */ - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET0, 0); - else - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_SIZE0, size); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0, size); /* cache window 1: stack */ - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst->gpu_addr + offset)); - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst->gpu_addr + offset)); - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(VCN, 0, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); /* non-cache window */ - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr)); - WREG32_SOC15(VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr)); - WREG32_SOC15(VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0, 0); - WREG32_SOC15(VCN, 0, regUVD_VCPU_NONCACHE_SIZE0, + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); } @@ -309,11 +327,12 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev) * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode * * @adev: amdgpu_device pointer + * @inst_idx: instance number index * @indirect: indirectly write sram * * Let the VCN memory controller know it's offsets with dpg mode */ -static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) +static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { uint32_t offset, size; const struct common_firmware_header *hdr; @@ -324,93 +343,97 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indir /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (!indirect) { - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } else { - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } offset = 0; } else { - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); offset = size; - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_OFFSET0), - 0, 0, indirect); + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); } if (!indirect) - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); else - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); /* cache window 1: stack */ if (!indirect) { - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); /* cache window 2: context */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); /* non-cache window */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } @@ -418,13 +441,13 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indir * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer + * @inst_idx: instance number * * Disable clock gating for VCN block */ -static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) +static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; - int inst_idx = 0; if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) return; @@ -434,9 +457,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, 0, regUVD_CGC_GATE); + data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE); data &= ~(UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__MPEG2_MASK | UVD_CGC_GATE__REGS_MASK @@ -450,10 +473,10 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) | UVD_CGC_GATE__VCPU_MASK | UVD_CGC_GATE__MMSCH_MASK); - WREG32_SOC15(VCN, 0, regUVD_CGC_GATE, data); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_CGC_GATE, 0, 0xFFFFFFFF); + WREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE, data); + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_CGC_GATE, 0, 0xFFFFFFFF); - data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK @@ -466,9 +489,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__MMSCH_MODE_MASK); - WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_GATE); + data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE); data |= (UVD_SUVD_CGC_GATE__SRE_MASK | UVD_SUVD_CGC_GATE__SIT_MASK | UVD_SUVD_CGC_GATE__SMP_MASK @@ -490,9 +513,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK | UVD_SUVD_CGC_GATE__SDB_VP9_MASK | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); - WREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_GATE, data); + WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE, data); - data = RREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL); data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK @@ -501,7 +524,7 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data); } /** @@ -509,12 +532,13 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * @sram_sel: sram select + * @inst_idx: instance number index * @indirect: indirectly write sram * * Disable clock gating for VCN block with dpg mode */ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, - uint8_t indirect) + int inst_idx, uint8_t indirect) { uint32_t reg_data = 0; @@ -538,19 +562,19 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD_CGC_CTRL__LRBBM_MODE_MASK | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect); /* turn off clock gating */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect); /* turn on SUVD clock gating */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } @@ -558,13 +582,13 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, * vcn_v4_0_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer + * @inst_idx: instance number * * Enable clock gating for VCN block */ -static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) +static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; - int inst_idx = 0; if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) return; @@ -574,9 +598,9 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, 0, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); data |= (UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK @@ -588,9 +612,9 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) | UVD_CGC_CTRL__LRBBM_MODE_MASK | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK); - WREG32_SOC15(VCN, 0, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL); data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK @@ -599,48 +623,51 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev) | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, 0, regUVD_SUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data); } /** * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode * * @adev: amdgpu_device pointer + * @inst_idx: instance number index * @indirect: indirectly write sram * * Start VCN block with dpg mode */ -static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) +static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + volatile struct amdgpu_vcn4_fw_shared *fw_shared = + adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_POWER_STATUS), 1, + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* enable dynamic power gating mode */ - tmp = RREG32_SOC15(VCN, 0, regUVD_POWER_STATUS); + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; - WREG32_SOC15(VCN, 0, regUVD_POWER_STATUS, tmp); + WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); if (indirect) - adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr; + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = + (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; /* enable clock gating */ - vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, indirect); + vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); /* enable VCPU clock */ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); /* disable master interrupt */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect); /* setup regUVD_LMI_CTRL */ @@ -652,80 +679,80 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, bool indirect) UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 0x00100000L); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MPC_CNTL), 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MPC_SET_MUXA0), ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MPC_SET_MUXB0), ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MPC_SET_MUX), ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); - vcn_v4_0_3_mc_resume_dpg_mode(adev, indirect); + vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect); tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); /* enable LMI MC and UMC channels */ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); /* enable master interrupt */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); + psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); - ring = &adev->vcn.inst->ring_dec; + ring = &adev->vcn.inst[inst_idx].ring_dec; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, 0, regUVD_RB_BASE_LO4, + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO4, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, 0, regUVD_RB_BASE_HI4, + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI4, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, 0, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); - WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, 0, regUVD_RB_RPTR4, 0); - WREG32_SOC15(VCN, 0, regUVD_RB_WPTR4, 0); - ring->wptr = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR4, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4, 0); + ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4); - tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB4_EN_MASK; - WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); - WREG32_SOC15(VCN, 0, regUVD_SCRATCH2, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_SCRATCH2, 0); /*resetting done, fw can check RB ring */ fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); @@ -745,159 +772,162 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; - int j, k, r; + int i, j, k, r; if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v4_0_3_start_dpg_mode(adev, adev->vcn.indirect_sram); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } - /* set VCN status busy */ - tmp = RREG32_SOC15(VCN, 0, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; - WREG32_SOC15(VCN, 0, regUVD_STATUS, tmp); + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); - /*SW clock gating */ - vcn_v4_0_3_disable_clock_gating(adev); + /*SW clock gating */ + vcn_v4_0_3_disable_clock_gating(adev, i); - /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); - /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); - /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - tmp = RREG32_SOC15(VCN, 0, regUVD_SOFT_RESET); - tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; - tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); - /* setup regUVD_LMI_CTRL */ - tmp = RREG32_SOC15(VCN, 0, regUVD_LMI_CTRL); - WREG32_SOC15(VCN, 0, regUVD_LMI_CTRL, tmp | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__MASK_MC_URGENT_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); - /* setup regUVD_MPC_CNTL */ - tmp = RREG32_SOC15(VCN, 0, regUVD_MPC_CNTL); - tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; - tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; - WREG32_SOC15(VCN, 0, regUVD_MPC_CNTL, tmp); + /* setup regUVD_MPC_CNTL */ + tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); - /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(VCN, 0, regUVD_MPC_SET_MUXA0, - ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); - /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(VCN, 0, regUVD_MPC_SET_MUXB0, - ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); - /* setup UVD_MPC_SET_MUX */ - WREG32_SOC15(VCN, 0, regUVD_MPC_SET_MUX, - ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | - (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + /* setup UVD_MPC_SET_MUX */ + WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); - vcn_v4_0_3_mc_resume(adev); + vcn_v4_0_3_mc_resume(adev, i); - /* VCN global tiling registers */ - WREG32_SOC15(VCN, 0, regUVD_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(VCN, 0, regUVD_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + /* VCN global tiling registers */ + WREG32_SOC15(VCN, i, regUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); - /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_RB_ARB_CTRL), 0, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); - for (j = 0; j < 10; ++j) { - uint32_t status; + for (j = 0; j < 10; ++j) { + uint32_t status; - for (k = 0; k < 100; ++k) { - status = RREG32_SOC15(VCN, 0, regUVD_STATUS); + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, i, regUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; if (status & 2) break; - if (amdgpu_emu_mode == 1) - msleep(500); - else - mdelay(10); + + DRM_DEV_ERROR(adev->dev, + "VCN decode not responding, trying to reset the VCPU!!!\n"); + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; } - r = 0; - if (status & 2) - break; - DRM_DEV_ERROR(adev->dev, - "VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); - mdelay(10); - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + if (r) { + DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n"); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + ring = &adev->vcn.inst[i].ring_dec; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + + /* program the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO4, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI4, + upper_32_bits(ring->gpu_addr)); + + WREG32_SOC15(VCN, i, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); + + /* resetting ring, fw should not check RB ring */ + tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); + WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, i, regUVD_RB_RPTR4, 0); + WREG32_SOC15(VCN, i, regUVD_RB_WPTR4, 0); + + tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB4_EN_MASK; + WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + + ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR4); + fw_shared->sq.queue_mode &= + cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); - mdelay(10); - r = -1; } - - if (r) { - DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n"); - return r; - } - - /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_MASTINT_EN), - UVD_MASTINT_EN__VCPU_EN_MASK, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - - ring = &adev->vcn.inst->ring_dec; - - fw_shared = adev->vcn.inst->fw_shared.cpu_addr; - - /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, 0, regUVD_RB_BASE_LO4, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, 0, regUVD_RB_BASE_HI4, - upper_32_bits(ring->gpu_addr)); - - WREG32_SOC15(VCN, 0, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); - - /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); - tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); - WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); - - /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, 0, regUVD_RB_RPTR4, 0); - WREG32_SOC15(VCN, 0, regUVD_RB_WPTR4, 0); - - tmp = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); - tmp |= VCN_RB_ENABLE__RB4_EN_MASK; - WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, tmp); - - ring->wptr = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); - fw_shared->sq.queue_mode &= cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); - return 0; } @@ -905,32 +935,27 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode * * @adev: amdgpu_device pointer + * @inst_idx: instance number index * * Stop VCN block with dpg mode */ -static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev) +static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { uint32_t tmp; /* Wait for power status to be 1 */ - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_POWER_STATUS, 1, + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ - tmp = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4); + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR4, tmp, 0xFFFFFFFF); - tmp = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR2); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_RB_RPTR2, tmp, 0xFFFFFFFF); - - tmp = RREG32_SOC15(VCN, 0, regUVD_RB_WPTR4); - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_RB_RPTR4, tmp, 0xFFFFFFFF); - - SOC15_WAIT_ON_RREG(VCN, 0, regUVD_POWER_STATUS, 1, + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* disable dynamic power gating mode */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_POWER_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); return 0; } @@ -945,65 +970,68 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev) static int vcn_v4_0_3_stop(struct amdgpu_device *adev) { uint32_t tmp; - int r = 0; + int i, r = 0; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - r = vcn_v4_0_3_stop_dpg_mode(adev); - goto Done; + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v4_0_3_stop_dpg_mode(adev, i); + goto Done; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + goto Done; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + if (r) + goto Done; + + /* stall UMC channel */ + tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + if (r) + goto Done; + + /* Unblock VCPU Register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* reset LMI UMC/LMI/VCPU */ + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + + /* clear VCN status */ + WREG32_SOC15(VCN, i, regUVD_STATUS, 0); + + /* apply HW clock gating */ + vcn_v4_0_3_enable_clock_gating(adev, i); } - - /* wait for vcn idle */ - r = SOC15_WAIT_ON_RREG(VCN, 0, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); - if (r) - return r; - - tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | - UVD_LMI_STATUS__READ_CLEAN_MASK | - UVD_LMI_STATUS__WRITE_CLEAN_MASK | - UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, 0, regUVD_LMI_STATUS, tmp, tmp); - if (r) - return r; - - /* stall UMC channel */ - tmp = RREG32_SOC15(VCN, 0, regUVD_LMI_CTRL2); - tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; - WREG32_SOC15(VCN, 0, regUVD_LMI_CTRL2, tmp); - tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| - UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, 0, regUVD_LMI_STATUS, tmp, tmp); - if (r) - return r; - - /* Unblock VCPU Register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_RB_ARB_CTRL), - UVD_RB_ARB_CTRL__VCPU_DIS_MASK, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - - /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); - - /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, regUVD_VCPU_CNTL), 0, - ~(UVD_VCPU_CNTL__CLK_EN_MASK)); - - /* reset LMI UMC/LMI/VCPU */ - tmp = RREG32_SOC15(VCN, 0, regUVD_SOFT_RESET); - tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); - - tmp = RREG32_SOC15(VCN, 0, regUVD_SOFT_RESET); - tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; - WREG32_SOC15(VCN, 0, regUVD_SOFT_RESET, tmp); - - /* clear VCN status */ - WREG32_SOC15(VCN, 0, regUVD_STATUS, 0); - - /* apply HW clock gating */ - vcn_v4_0_3_enable_clock_gating(adev); - Done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); @@ -1113,8 +1141,15 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_dec_sw_ring_vm_funcs = { */ static void vcn_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.inst->ring_dec.funcs = &vcn_v4_0_3_dec_sw_ring_vm_funcs; - adev->vcn.inst->ring_dec.me = 0; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + adev->vcn.inst[i].ring_dec.funcs = &vcn_v4_0_3_dec_sw_ring_vm_funcs; + adev->vcn.inst[i].ring_dec.me = i; + adev->vcn.inst[i].aid_id = i / adev->vcn.num_inst_per_aid; + } DRM_DEV_INFO(adev->dev, "VCN decode(Software Ring) is enabled in VM mode\n"); } @@ -1128,8 +1163,15 @@ static void vcn_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) static bool vcn_v4_0_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; - return (RREG32_SOC15(VCN, 0, regUVD_STATUS) == UVD_STATUS__IDLE); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); + } + + return ret; } /** @@ -1142,13 +1184,21 @@ static bool vcn_v4_0_3_is_idle(void *handle) static int vcn_v4_0_3_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 0; - return SOC15_WAIT_ON_RREG(VCN, 0, regUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); + if (ret) + return ret; + } + + return ret; } -/** - * vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state +/* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state * * @handle: amdgpu_device pointer * @state: clock gating state @@ -1160,15 +1210,19 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; - if (enable) { - if (RREG32_SOC15(VCN, 0, regUVD_STATUS) != UVD_STATUS__IDLE) - return -EBUSY; - vcn_v4_0_3_enable_clock_gating(adev); - } else { - vcn_v4_0_3_disable_clock_gating(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + if (enable) { + if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v4_0_3_enable_clock_gating(adev, i); + } else { + vcn_v4_0_3_disable_clock_gating(adev, i); + } } - return 0; } @@ -1231,11 +1285,15 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t i; + + i = node_id_to_phys_map[entry->node_id]; + DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); switch (entry->src_id) { case VCN_4_0__SRCID__UVD_TRAP: - amdgpu_fence_process(&adev->vcn.inst->ring_dec); + amdgpu_fence_process(&adev->vcn.inst[i].ring_dec); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -1266,9 +1324,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst->irq.num_types = 1; - adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; + adev->vcn.inst->irq.num_types++; } + adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; } static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { From 0b02364e03caecbe30bdd9db0b3e6ba0196bb0ef Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 29 Jul 2022 14:56:59 +0530 Subject: [PATCH 326/861] drm/amdgpu: Conform to SET_UCONFIG_REG spec The packet expects only 16 bits register offset. Hence pass register offset which is local to each XCC. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 43126f7b70ea..14b96b43f02c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -232,13 +232,14 @@ static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) { + uint32_t scratch_reg0_offset, xcc_offset; struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; unsigned i; int r; - /* scratch_reg0_offset is 32bit even with full XCD config */ - uint32_t scratch_reg0_offset; + /* Use register offset which is local to XCC in the packet */ + xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); WREG32(scratch_reg0_offset, 0xCAFEDEAD); @@ -247,7 +248,7 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, scratch_reg0_offset - PACKET3_SET_UCONFIG_REG_START); + amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); From 5fb34bd9cf9e248d7e84e431a4a6b731334ab564 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Tue, 24 May 2022 10:22:12 -0500 Subject: [PATCH 327/861] drm/amdkfd: pass kfd_node ref to svm migration api This work is required for GC 9.4.3, previous to support memory partitions per node at SVM. When multiple partition is configured, every BO should be allocated inside one specific partition which corresponds to the current amdgpu_device and kfd_node. v2: squash in compilation fix (Alex) v3: squash in fix for pre-gfx 9.4.3 (Alex) v4: squash in best_loc fix (Alex) Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 33 ++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 45 +++---- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 30 ++++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 152 +++++++++++------------ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 21 ++-- 9 files changed, 166 insertions(+), 133 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c3964c14f215..c390b2856cc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2441,7 +2441,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - uint64_t addr, bool write_fault) + u32 client_id, u32 node_id, uint64_t addr, + bool write_fault) { bool is_compute_context = false; struct amdgpu_bo *root; @@ -2465,8 +2466,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; - if (is_compute_context && - !svm_range_restore_pages(adev, pasid, addr, write_fault)) { + if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id, + node_id, addr, write_fault)) { amdgpu_bo_unref(&root); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9f5d32b0fda1..dbab31647186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -455,7 +455,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - uint64_t addr, bool write_fault); + u32 client_id, u32 node_id, uint64_t addr, + bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index d76f5c8d4977..01bd45651382 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -139,7 +139,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2c322a25bf1c..c5752a349f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,11 +557,24 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; - uint32_t node_id; + uint32_t node_id = 0; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (entry->client_id == SOC15_IH_CLIENTID_VMC) { + hub_name = "mmhub0"; + hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { + hub_name = "mmhub1"; + hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; + } else { + hub_name = "gfxhub0"; + node_id = (adev->ip_versions[GC_HWIP][0] == + IP_VERSION(9, 4, 3)) ? entry->node_id : 0; + hub = &adev->vmhub[node_id/2]; + } + if (retry_fault) { if (adev->irq.retry_cam_enabled) { /* Delegate it to a different ring if the hardware hasn't @@ -574,7 +587,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; - ret = amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault); + ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + addr, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) return 1; @@ -596,7 +610,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + addr, write_fault)) return 1; } } @@ -604,18 +619,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (!printk_ratelimit()) return 0; - if (entry->client_id == SOC15_IH_CLIENTID_VMC) { - hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; - } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { - hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; - } else { - hub_name = "gfxhub0"; - node_id = (adev->ip_versions[GC_HWIP][0] == - IP_VERSION(9, 4, 3)) ? entry->node_id : 0; - hub = &adev->vmhub[node_id/2]; - } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 5f4dc2a45bd0..e7e5abc32c84 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -287,11 +287,12 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) } static int -svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch, uint64_t ttm_res_offset) { - uint64_t npages = migrate->npages; + uint64_t npages = migrate->cpages; + struct amdgpu_device *adev = node->adev; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -321,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); if (r) { - dev_err(adev->dev, "%s: fail %d dma_map_page\n", + dev_err(dev, "%s: fail %d dma_map_page\n", __func__, r); goto out_free_vram_pages; } @@ -390,12 +391,13 @@ out_free_vram_pages: } static long -svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); uint64_t npages = (end - start) >> PAGE_SHIFT; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; @@ -445,7 +447,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, else pr_debug("0x%lx pages migrated\n", cpages); - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset); + r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", @@ -465,7 +467,7 @@ out_free: kvfree(buf); out: if (!r && cpages) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); @@ -492,8 +494,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, { unsigned long addr, start, end; struct vm_area_struct *vma; - struct amdgpu_device *adev; uint64_t ttm_res_offset; + struct kfd_node *node; unsigned long cpages = 0; long r = 0; @@ -503,9 +505,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, return 0; } - adev = svm_range_get_adev_by_id(prange, best_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", best_loc); + node = svm_range_get_node_by_id(prange, best_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", best_loc); return -ENODEV; } @@ -515,9 +517,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; - r = svm_range_vram_node_new(adev, prange, true); + r = svm_range_vram_node_new(node, prange, true); if (r) { - dev_dbg(adev->dev, "fail %ld to alloc vram\n", r); + dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); return r; } ttm_res_offset = prange->offset << PAGE_SHIFT; @@ -530,7 +532,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, break; next = min(vma->vm_end, end); - r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset); + r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset); if (r < 0) { pr_debug("failed %ld to migrate\n", r); break; @@ -663,7 +665,7 @@ out_oom: * positive values - partial migration, number of pages not migrated */ static long -svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, struct page *fault_page) { @@ -671,6 +673,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; @@ -745,7 +748,7 @@ out_free: kvfree(buf); out: if (!r && cpages) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); } @@ -766,7 +769,7 @@ out: int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, uint32_t trigger, struct page *fault_page) { - struct amdgpu_device *adev; + struct kfd_node *node; struct vm_area_struct *vma; unsigned long addr; unsigned long start; @@ -780,13 +783,11 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, return 0; } - adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", - prange->actual_loc); + node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } - pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", prange->svms, prange, prange->start, prange->last, prange->actual_loc); @@ -805,7 +806,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, } next = min(vma->vm_end, end); - r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger, + r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger, fault_page); if (r < 0) { pr_debug("failed %ld to migrate prange %p\n", r, prange); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 400b4dcbdf05..df372de6b056 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -289,6 +289,7 @@ struct kfd_node { * from the HW ring into a SW ring. */ bool interrupts_active; + uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ /* QCM Device instance */ struct device_queue_manager *dqm; @@ -971,9 +972,8 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); -int kfd_process_gpuid_from_adev(struct kfd_process *p, - struct amdgpu_device *adev, uint32_t *gpuid, - uint32_t *gpuidx); +int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, + uint32_t *gpuid, uint32_t *gpuidx); static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, uint32_t gpuidx, uint32_t *gpuid) { return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; @@ -1073,6 +1073,30 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); +static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id, + uint32_t node_id) +{ + if ((node->interrupt_bitmap & (0x1U << node_id)) || + ((node_id % 4) == 0 && + (node->interrupt_bitmap >> 16) & (0x1U << client_id))) + return true; + + return false; +} +static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, + uint32_t client_id, uint32_t node_id) { + struct kfd_dev *dev = adev->kfd.dev; + uint32_t i; + + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + return dev->nodes[0]; + + for (i = 0; i < dev->num_nodes; i++) + if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id)) + return dev->nodes[i]; + + return NULL; +} int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index c3d43e6e5236..666815b227a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1891,13 +1891,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) } int -kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, - uint32_t *gpuid, uint32_t *gpuidx) +kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, + uint32_t *gpuid, uint32_t *gpuidx) { int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { + if (p->pdds[i] && p->pdds[i]->dev == node) { *gpuid = p->pdds[i]->user_gpu_id; *gpuidx = i; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 4b4f3bf8b823..639831fbb6ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -170,8 +170,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, page = hmm_pfn_to_page(hmm_pfns[i]); if (is_zone_device_page(page)) { - struct amdgpu_device *bo_adev = - amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); + struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - @@ -424,10 +423,8 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo) } static bool -svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) +svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange) { - struct amdgpu_device *bo_adev; - mutex_lock(&prange->lock); if (!prange->svm_bo) { mutex_unlock(&prange->lock); @@ -440,12 +437,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) } if (svm_bo_ref_unless_zero(prange->svm_bo)) { /* - * Migrate from GPU to GPU, remove range from source bo_adev - * svm_bo range list, and return false to allocate svm_bo from - * destination adev. + * Migrate from GPU to GPU, remove range from source svm_bo->node + * range list, and return false to allocate svm_bo from destination + * node. */ - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - if (bo_adev != adev) { + if (prange->svm_bo->node != node) { mutex_unlock(&prange->lock); spin_lock(&prange->svm_bo->list_lock); @@ -513,7 +509,7 @@ static struct svm_range_bo *svm_range_bo_new(void) } int -svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, +svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { struct amdgpu_bo_param bp; @@ -528,7 +524,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, prange->start, prange->last); - if (svm_range_validate_svm_bo(adev, prange)) + if (svm_range_validate_svm_bo(node, prange)) return 0; svm_bo = svm_range_bo_new(); @@ -542,6 +538,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, kfree(svm_bo); return -ESRCH; } + svm_bo->node = node; svm_bo->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), mm, @@ -559,7 +556,10 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, bp.type = ttm_bo_type_device; bp.resv = NULL; - r = amdgpu_bo_create_user(adev, &bp, &ubo); + /* TODO: Allocate memory from the right memory partition. We can sort + * out the details later, once basic memory partitioning is working + */ + r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); goto create_bo_failed; @@ -617,45 +617,30 @@ void svm_range_vram_node_free(struct svm_range *prange) prange->ttm_res = NULL; } -struct amdgpu_device * -svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) +struct kfd_node * +svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id) { - struct kfd_process_device *pdd; struct kfd_process *p; - int32_t gpu_idx; + struct kfd_process_device *pdd; p = container_of(prange->svms, struct kfd_process, svms); - - gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id); - if (gpu_idx < 0) { - pr_debug("failed to get device by id 0x%x\n", gpu_id); - return NULL; - } - pdd = kfd_process_device_from_gpuidx(p, gpu_idx); + pdd = kfd_process_device_data_by_id(p, gpu_id); if (!pdd) { - pr_debug("failed to get device by idx 0x%x\n", gpu_idx); + pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id); return NULL; } - return pdd->dev->adev; + return pdd->dev; } struct kfd_process_device * -svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) +svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node) { struct kfd_process *p; - int32_t gpu_idx, gpuid; - int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); - if (r) { - pr_debug("failed to get device id by adev %p\n", adev); - return NULL; - } - - return kfd_process_device_from_gpuidx(p, gpu_idx); + return kfd_get_process_device_data(node, p); } static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) @@ -1148,12 +1133,18 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, } return 0; } +static bool +svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) +{ + return (node_a->adev == node_b->adev || + amdgpu_xgmi_same_hive(node_a->adev, node_b->adev)); +} static uint64_t -svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, - int domain) +svm_range_get_pte_flags(struct kfd_node *node, + struct svm_range *prange, int domain) { - struct amdgpu_device *bo_adev; + struct kfd_node *bo_node; uint32_t flags = prange->flags; uint32_t mapping_flags = 0; uint64_t pte_flags; @@ -1162,18 +1153,18 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; if (domain == SVM_RANGE_VRAM_DOMAIN) - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); + bo_node = prange->svm_bo->node; - switch (KFD_GC_VERSION(adev->kfd.dev)) { + switch (node->adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_adev == adev) { + if (bo_node == node) { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + if (svm_nodes_in_same_hive(node, bo_node)) snoop = true; } } else { @@ -1183,15 +1174,15 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, break; case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_adev == adev) { + if (bo_node == node) { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; - if (adev->gmc.xgmi.connected_to_cpu) + if (node->adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + if (svm_nodes_in_same_hive(node, bo_node)) snoop = true; } } else { @@ -1207,7 +1198,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (uncached) mapping_flags |= AMDGPU_VM_MTYPE_UC; /* local HBM region close to partition*/ - else if (bo_adev == adev) + else if (bo_node == node) mapping_flags |= AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU or * system memory @@ -1231,7 +1222,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM; pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; - pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags); + pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags); return pte_flags; } @@ -1338,7 +1329,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", last_start, prange->start + i, last_domain ? "GPU" : "CPU"); - pte_flags = svm_range_get_pte_flags(adev, prange, last_domain); + pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); if (readonly) pte_flags &= ~AMDGPU_PTE_WRITEABLE; @@ -1347,6 +1338,9 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, pte_flags); + /* TODO: we still need to determine the vm_manager.vram_base_offset based on + * the memory partition. + */ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, @@ -1384,16 +1378,14 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, unsigned long *bitmap, bool wait, bool flush_tlb) { struct kfd_process_device *pdd; - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev = NULL; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; int r = 0; if (prange->svm_bo && prange->ttm_res) - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - else - bo_adev = NULL; + bo_adev = prange->svm_bo->node->adev; p = container_of(prange->svms, struct kfd_process, svms); for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { @@ -2526,17 +2518,17 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, */ static int32_t svm_range_best_restore_location(struct svm_range *prange, - struct amdgpu_device *adev, + struct kfd_node *node, int32_t *gpuidx) { - struct amdgpu_device *bo_adev, *preferred_adev; + struct kfd_node *bo_node, *preferred_node; struct kfd_process *p; uint32_t gpuid; int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; @@ -2546,9 +2538,8 @@ svm_range_best_restore_location(struct svm_range *prange, prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { - preferred_adev = svm_range_get_adev_by_id(prange, - prange->preferred_loc); - if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc); + if (preferred_node && svm_nodes_in_same_hive(node, preferred_node)) return prange->preferred_loc; /* fall through */ } @@ -2560,8 +2551,8 @@ svm_range_best_restore_location(struct svm_range *prange, if (!prange->actual_loc) return 0; - bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + bo_node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (bo_node && svm_nodes_in_same_hive(node, bo_node)) return prange->actual_loc; else return 0; @@ -2678,7 +2669,7 @@ svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, } static struct -svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, +svm_range *svm_range_create_unregistered_range(struct kfd_node *node, struct kfd_process *p, struct mm_struct *mm, int64_t addr) @@ -2713,7 +2704,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange, true); return NULL; @@ -2767,7 +2758,7 @@ static bool svm_range_skip_recover(struct svm_range *prange) } static void -svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, +svm_range_count_fault(struct kfd_node *node, struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; @@ -2780,7 +2771,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx); if (r < 0) return; } @@ -2808,6 +2799,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, + uint32_t client_id, uint32_t node_id, uint64_t addr, bool write_fault) { struct mm_struct *mm = NULL; @@ -2815,6 +2807,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, struct svm_range *prange; struct kfd_process *p; ktime_t timestamp = ktime_get_boottime(); + struct kfd_node *node; int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; @@ -2858,6 +2851,13 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } + node = kfd_node_by_irq_ids(adev, node_id, client_id); + if (!node) { + pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id, + client_id); + r = -EFAULT; + goto out; + } mmap_read_lock(mm); retry_write_locked: mutex_lock(&svms->lock); @@ -2876,7 +2876,7 @@ retry_write_locked: write_locked = true; goto retry_write_locked; } - prange = svm_range_create_unregistered_range(adev, p, mm, addr); + prange = svm_range_create_unregistered_range(node, p, mm, addr); if (!prange) { pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n", svms, addr); @@ -2891,7 +2891,7 @@ retry_write_locked: mutex_lock(&prange->migrate_mutex); if (svm_range_skip_recover(prange)) { - amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); r = 0; goto out_unlock_range; } @@ -2922,7 +2922,7 @@ retry_write_locked: goto out_unlock_range; } - best_loc = svm_range_best_restore_location(prange, adev, &gpuidx); + best_loc = svm_range_best_restore_location(prange, node, &gpuidx); if (best_loc == -1) { pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n", svms, prange->start, prange->last); @@ -2981,7 +2981,7 @@ out_unlock_svms: mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(adev, p, gpuidx); + svm_range_count_fault(node, p, gpuidx); mmput(mm); out: @@ -2989,7 +2989,7 @@ out: if (r == -EAGAIN) { pr_debug("recover vm fault later\n"); - amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); r = 0; } return r; @@ -3231,7 +3231,7 @@ svm_range_best_prefetch_location(struct svm_range *prange) DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; - struct amdgpu_device *bo_adev; + struct kfd_node *bo_node; struct kfd_process *p; uint32_t gpuidx; @@ -3240,9 +3240,9 @@ svm_range_best_prefetch_location(struct svm_range *prange) if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) goto out; - bo_adev = svm_range_get_adev_by_id(prange, best_loc); - if (!bo_adev) { - WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc); + bo_node = svm_range_get_node_by_id(prange, best_loc); + if (!bo_node) { + WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc); best_loc = 0; goto out; } @@ -3260,10 +3260,10 @@ svm_range_best_prefetch_location(struct svm_range *prange) continue; } - if (pdd->dev->adev == bo_adev) + if (pdd->dev->adev == bo_node->adev) continue; - if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { + if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) { best_loc = 0; break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 7a33b93f9df6..a165c73b40b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -48,6 +48,7 @@ struct svm_range_bo { struct work_struct eviction_work; uint32_t evicting; struct work_struct release_work; + struct kfd_node *node; }; enum svm_work_list_ops { @@ -163,16 +164,17 @@ int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, struct svm_range *svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, struct svm_range **parent); -struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange, - uint32_t id); -int svm_range_vram_node_new(struct amdgpu_device *adev, - struct svm_range *prange, bool clear); +struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, + uint32_t gpu_id); +int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, + bool clear); void svm_range_vram_node_free(struct svm_range *prange); int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, unsigned long addr, struct svm_range *parent, struct svm_range *prange); -int svm_range_restore_pages(struct amdgpu_device *adev, - unsigned int pasid, uint64_t addr, bool write_fault); +int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, + uint32_t client_id, uint32_t node_id, uint64_t addr, + bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, @@ -192,7 +194,7 @@ int kfd_criu_restore_svm(struct kfd_process *p, uint64_t max_priv_data_size); int kfd_criu_resume_svm(struct kfd_process *p); struct kfd_process_device * -svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); +svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node); void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); /* SVM API and HMM page migration work together, device memory type @@ -219,8 +221,9 @@ static inline void svm_range_list_fini(struct kfd_process *p) } static inline int svm_range_restore_pages(struct amdgpu_device *adev, - unsigned int pasid, uint64_t addr, - bool write_fault) + unsigned int pasid, + uint32_t client_id, uint32_t node_id, + uint64_t addr, bool write_fault) { return -EFAULT; } From cd321e6fd611db983fb7cdb52daf089fabe4f9c7 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 8 Aug 2022 11:20:36 +0530 Subject: [PATCH 328/861] drm/amdgpu: Use status register for partition mode Program partition status register to reflect the current partition mode. Partition capability register is for capability and is a one-time setting. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 28 +++++++++++--------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index bcd3e08d01cd..ef0b557e9b3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -418,20 +418,13 @@ static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev, static enum amdgpu_gfx_partition nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) { - u32 tmp; + u32 tmp, px; - tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP); + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS); + px = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS, + PARTITION_MODE); - if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, SPX_SUPPORT)) - return AMDGPU_SPX_PARTITION_MODE; - else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, DPX_SUPPORT)) - return AMDGPU_DPX_PARTITION_MODE; - else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, TPX_SUPPORT)) - return AMDGPU_TPX_PARTITION_MODE; - else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, CPX_SUPPORT)) - return AMDGPU_CPX_PARTITION_MODE; - else - return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + return ffs(px); } static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, @@ -439,11 +432,14 @@ static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, { u32 tmp; - tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP); - tmp &= ~0x1f; - tmp |= 1 << mode; + /* Each bit represents DPX,TPX,QPX,CPX mode. No bit set means default + * SPX mode. + */ + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS); + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS, + PARTITION_MODE, mode ? BIT(mode - 1) : mode); - WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP, tmp); + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS, tmp); } const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { From d6e924ad85a0cebc9e39eb956a23386ce32cc9f9 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 9 Aug 2022 14:56:53 -0400 Subject: [PATCH 329/861] drm/amdkfd: Update SMI events for GFX9.4.3 On GFX 9.4.3, there can be multiple KFD nodes. As a result, SMI events for SVM, queue evict/restore should be raised for each node independently. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 16 ++++----- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 ++-- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 38 ++++++++++----------- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 14 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 ++-- 5 files changed, 40 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index e7e5abc32c84..42e599912e52 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,9 +423,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, migrate.dst = migrate.src + npages; scratch = (dma_addr_t *)(migrate.dst + npages); - kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_start(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc, + 0, node->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,9 +456,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->nodes[0]->id, trigger); + 0, node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -702,9 +702,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, migrate.fault_page = fault_page; scratch = (dma_addr_t *)(migrate.dst + npages); - kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_start(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc, + node->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -738,9 +738,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->nodes[0]->id, 0, trigger); + node->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 666815b227a8..a6ff57f11472 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1817,7 +1817,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_eviction(pdd->dev->kfd, p->lead_thread->pid, + kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, trigger); r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, @@ -1845,7 +1845,7 @@ fail: if (n_evicted == 0) break; - kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd)) @@ -1866,7 +1866,7 @@ int kfd_process_restore_queues(struct kfd_process *p) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index b703da59e067..d9953c2b2661 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -250,58 +250,58 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) task_info.pid, task_info.task_name); } -void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->nodes[0]->id, write_fault ? 'W' : 'R'); + address, node->id, write_fault ? 'W' : 'R'); } -void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->nodes[0]->id, migration ? 'M' : 'U'); + pid, address, node->id, migration ? 'M' : 'U'); } -void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); } -void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); } -void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->nodes[0]->id, trigger); + node->id, trigger); } -void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) +void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->nodes[0]->id); + node->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -324,13 +324,13 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) kfd_unref_process(p); } -void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->nodes[0]->id, trigger); + pid, address, last - address + 1, node->id, trigger); } int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 59cd089f80d1..fa95c2dfd587 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -29,24 +29,24 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid); void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask); void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset); -void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid, unsigned long address, bool write_fault, ktime_t ts); -void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid, unsigned long address, bool migration); -void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger); -void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger); -void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger); -void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid); +void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid); void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm); -void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 639831fbb6ca..0dafbbe954ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1274,7 +1274,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, return -EINVAL; } - kfd_smi_event_unmap_from_gpu(pdd->dev->kfd, p->lead_thread->pid, + kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, start, last, trigger); r = svm_range_unmap_from_gpu(pdd->dev->adev, @@ -2934,7 +2934,7 @@ retry_write_locked: svms, prange->start, prange->last, best_loc, prange->actual_loc); - kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr, + kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, write_fault, timestamp); if (prange->actual_loc != best_loc) { @@ -2972,7 +2972,7 @@ retry_write_locked: pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", r, svms, prange->start, prange->last); - kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr, + kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); out_unlock_range: From 58bd8023752f3d7e6888d5cbb27387853b04c431 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 24 Aug 2022 17:41:47 +0800 Subject: [PATCH 330/861] drm/amdgpu: adjust s2a entry register for sdma doorbell trans decoding Use s2a entry 5/6 registers to decode sdma doorbell trans on different AIDs, which aligns the entry table in SHUB spec, and leave entry 4 dedicated for VCN doorbell to avoid conflict. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 32 +++++++------------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index ef0b557e9b3e..5dbab8ab9b27 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -151,18 +151,10 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x8); - if (aid_id != 0) - WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, - regS2A_DOORBELL_ENTRY_3_CTRL) - + S2A_DOORBELL_REG_LSD_OFFSET) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); - else - WREG32(SOC15_REG_OFFSET(NBIO, 0, - regS2A_DOORBELL_ENTRY_5_CTRL) - + S2A_DOORBELL_REG_LSD_OFFSET, - doorbell_ctrl); + WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); break; case 3: WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4) + @@ -178,18 +170,10 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x9); - if (aid_id != 0) - WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, - regS2A_DOORBELL_ENTRY_4_CTRL) - + S2A_DOORBELL_REG_LSD_OFFSET) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); - else - WREG32(SOC15_REG_OFFSET(NBIO, 0, - regS2A_DOORBELL_ENTRY_6_CTRL) - + S2A_DOORBELL_REG_LSD_OFFSET, - doorbell_ctrl); + WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_6_CTRL) * 4 + + AMDGPU_SMN_TARGET_AID(aid_id) + + AMDGPU_SMN_CROSS_AID * !!aid_id, + doorbell_ctrl); break; default: break; From 9cd51d53695e1df134301c1bdb8a8f965506e35b Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 25 Aug 2022 15:51:43 +0800 Subject: [PATCH 331/861] drm/amdgpu: drop redundant csb init for gfx943 It's not required for compute pipeline and will cause soft lockup on emulation due to long-time writing. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 99 ------------------------- 1 file changed, 99 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 14b96b43f02c..73f652ad5b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -429,75 +429,6 @@ static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) return r; } -static u32 gfx_v9_4_3_get_csb_size(struct amdgpu_device *adev) -{ - u32 count = 0; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; - - /* begin clear state */ - count += 2; - /* context control state */ - count += 3; - - for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) - count += 2 + ext->reg_count; - else - return 0; - } - } - - /* end clear state */ - count += 2; - /* clear state */ - count += 2; - - return count; -} - -static void gfx_v9_4_3_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) -{ - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; - - if (adev->gfx.rlc.cs_data == NULL) - return; - if (buffer == NULL) - return; - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); -} - static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1112,22 +1043,8 @@ static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev, WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); } -static void gfx_v9_4_3_init_csb(struct amdgpu_device *adev, int xcc_id) -{ - adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); - /* csib */ - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI), - adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO), - adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_CSIB_LENGTH), - adev->gfx.rlc.clear_state_size); -} - static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) { - gfx_v9_4_3_init_csb(adev, xcc_id); - /* * Rlc save restore list is workable since v2_1. * And it's needed by gfxoff feature. @@ -1219,20 +1136,6 @@ static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) { - const struct cs_section_def *cs_data; - int r; - - adev->gfx.rlc.cs_data = gfx9_cs_data; - - cs_data = adev->gfx.rlc.cs_data; - - if (cs_data) { - /* init clear state block */ - r = amdgpu_gfx_rlc_init_csb(adev); - if (r) - return r; - } - /* init spm vmid with 0xf */ if (adev->gfx.rlc.funcs->update_spm_vmid) adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); @@ -2406,8 +2309,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { .set_safe_mode = gfx_v9_4_3_set_safe_mode, .unset_safe_mode = gfx_v9_4_3_unset_safe_mode, .init = gfx_v9_4_3_rlc_init, - .get_csb_size = gfx_v9_4_3_get_csb_size, - .get_csb_buffer = gfx_v9_4_3_get_csb_buffer, .resume = gfx_v9_4_3_rlc_resume, .stop = gfx_v9_4_3_rlc_stop, .reset = gfx_v9_4_3_rlc_reset, From a058799923af7984552c6c07d6bbb088c1537e6a Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Tue, 12 Apr 2022 15:37:15 -0400 Subject: [PATCH 332/861] drm/amdgpu: Fix SWS on multi-XCD GPU GFX_9_4_3 supports multi-XCDs and multi-AIDs in one GPU device. SWS needs to program IH_VMID_x_LUT with specified XCC instance and corresponded AID instance. Signed-off-by: Amber Lin Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 772774615cb8..58adbf2262cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -21,14 +21,13 @@ */ #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_amdkfd_arcturus.h" #include "amdgpu_amdkfd_gfx_v9.h" #include "gc/gc_9_4_3_offset.h" #include "gc/gc_9_4_3_sh_mask.h" #include "athub/athub_1_8_0_offset.h" #include "athub/athub_1_8_0_sh_mask.h" -#include "oss/osssys_4_0_offset.h" -#include "oss/osssys_4_0_sh_mask.h" +#include "oss/osssys_4_4_2_offset.h" +#include "oss/osssys_4_4_2_sh_mask.h" #include "v9_structs.h" #include "soc15.h" #include "sdma/sdma_4_4_2_offset.h" @@ -220,9 +219,12 @@ int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, } static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, - u32 pasid, unsigned int vmid, uint32_t inst) + u32 pasid, unsigned int vmid, uint32_t xcc_inst) { unsigned long timeout; + unsigned int reg; + /* Every two XCCs share one AID */ + unsigned int aid = xcc_inst / 2; /* * We have to assume that there is no outstanding mapping. @@ -234,11 +236,11 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | ATC_VMID0_PASID_MAPPING__VALID_MASK; - WREG32(SOC15_REG_OFFSET(ATHUB, inst, + WREG32(SOC15_REG_OFFSET(ATHUB, 0, regATC_VMID0_PASID_MAPPING) + vmid, pasid_mapping); timeout = jiffies + msecs_to_jiffies(10); - while (!(RREG32(SOC15_REG_OFFSET(ATHUB, inst, + while (!(RREG32(SOC15_REG_OFFSET(ATHUB, 0, regATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & (1U << vmid))) { if (time_after(jiffies, timeout)) { @@ -248,14 +250,25 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, cpu_relax(); } - WREG32(SOC15_REG_OFFSET(ATHUB, inst, + WREG32(SOC15_REG_OFFSET(ATHUB, 0, regATC_VMID_PASID_MAPPING_UPDATE_STATUS), 1U << vmid); - WREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT) + vmid, + reg = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX)); + /* Every 4 numbers is a cycle. 1st is AID, 2nd and 3rd are XCDs, + * and the 4th is reserved. Therefore "aid * 4 + (xcc_inst % 2) + 1" + * programs _LUT for XCC and "aid * 4" for AID where the XCC connects + * to. + */ + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), + aid * 4 + (xcc_inst % 2) + 1); + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid, pasid_mapping); - WREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT_MM) + vmid, + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), + aid * 4); + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid, pasid_mapping); + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), reg); return 0; } From 02ee3b02d77661c593c7b0e49f5baa4aa0974024 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 13 Sep 2022 15:13:18 -0400 Subject: [PATCH 333/861] drm/amdkfd: Use instance table for GFX 9.4.3 For GFX 9.4.3, use the logical to physical mapping table, to get the correct XCD instance when accessing registers on bare metal. Signed-off-by: Mukul Joshi Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 5 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 60 +++++++++---------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 58adbf2262cb..81dfbe39fd8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -223,8 +223,9 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, { unsigned long timeout; unsigned int reg; + unsigned int phy_inst = GET_INST(GC, xcc_inst); /* Every two XCCs share one AID */ - unsigned int aid = xcc_inst / 2; + unsigned int aid = phy_inst / 2; /* * We have to assume that there is no outstanding mapping. @@ -261,7 +262,7 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, * to. */ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), - aid * 4 + (xcc_inst % 2) + 1); + aid * 4 + (phy_inst % 2) + 1); WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid, pasid_mapping); WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 7918a00cbb5a..9fa9aab22fe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -50,12 +50,12 @@ static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint3 uint32_t queue, uint32_t vmid, uint32_t inst) { mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, mec, pipe, queue, vmid, inst); + soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst)); } static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst) { - soc15_grbm_select(adev, 0, 0, 0, 0, inst); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst)); mutex_unlock(&adev->srbm_mutex); } @@ -90,8 +90,8 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi { kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmSH_MEM_CONFIG), sh_mem_config); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmSH_MEM_BASES), sh_mem_bases); + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config); + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases); /* APE1 no longer exists on GFX9 */ kgd_gfx_v9_unlock_srbm(adev, inst); @@ -167,7 +167,7 @@ int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst); - WREG32_SOC15(GC, inst, mmCPC_INT_CNTL, + WREG32_SOC15(GC, GET_INST(GC, inst), mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); @@ -234,17 +234,17 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; - hqd_base = SOC15_REG_OFFSET(GC, inst, mmCP_MQD_BASE_ADDR); + hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR); for (reg = hqd_base; - reg <= SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI); reg++) + reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++) WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_DOORBELL_CONTROL), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL), data); if (wptr) { @@ -274,25 +274,25 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO), lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR), lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); - WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, + WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1, (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_EOP_RPTR), + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR), REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_ACTIVE), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data); kgd_gfx_v9_release_queue(adev, inst); @@ -370,8 +370,8 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); - for (reg = SOC15_REG_OFFSET(GC, inst, mmCP_MQD_BASE_ADDR); - reg <= SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI); reg++) + for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR); + reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); kgd_gfx_v9_release_queue(adev, inst); @@ -491,13 +491,13 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, uint32_t low, high; kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); - act = RREG32_SOC15(GC, inst, mmCP_HQD_ACTIVE); + act = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32_SOC15(GC, inst, mmCP_HQD_PQ_BASE) && - high == RREG32_SOC15(GC, inst, mmCP_HQD_PQ_BASE_HI)) + if (low == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI)) retval = true; } kgd_gfx_v9_release_queue(adev, inst); @@ -538,7 +538,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); if (m->cp_hqd_vmid == 0) - WREG32_FIELD15_RLC(GC, inst, RLC_CP_SCHEDULERS, scheduler1, 0); + WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0); switch (reset_type) { case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: @@ -555,11 +555,11 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, break; } - WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32_SOC15(GC, inst, mmCP_HQD_ACTIVE); + temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { @@ -633,8 +633,8 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); - WREG32_SOC15_RLC_SHADOW(GC, inst, mmGRBM_GFX_INDEX, gfx_index_val); - WREG32_SOC15(GC, inst, mmSQ_CMD, sq_cmd); + WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, gfx_index_val); + WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_CMD, sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -643,7 +643,7 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - WREG32_SOC15_RLC_SHADOW(GC, inst, mmGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, data); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -842,17 +842,17 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, /* * Program TBA registers */ - WREG32_SOC15(GC, inst, mmSQ_SHADER_TBA_LO, + WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO, lower_32_bits(tba_addr >> 8)); - WREG32_SOC15(GC, inst, mmSQ_SHADER_TBA_HI, + WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI, upper_32_bits(tba_addr >> 8)); /* * Program TMA registers */ - WREG32_SOC15(GC, inst, mmSQ_SHADER_TMA_LO, + WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_LO, lower_32_bits(tma_addr >> 8)); - WREG32_SOC15(GC, inst, mmSQ_SHADER_TMA_HI, + WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_HI, upper_32_bits(tma_addr >> 8)); kgd_gfx_v9_unlock_srbm(adev, inst); From cb30544e3cadf2164a123859519521dc474d21eb Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Thu, 29 Sep 2022 12:07:49 -0400 Subject: [PATCH 334/861] drm/amdgpu: Fix failure when switching to DPX mode Fix the if condition which causes dynamic repartitioning to fail when trying to switch to DPX mode. Signed-off-by: Mukul Joshi Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 69bac5b801ce..2f6d85090b55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1216,7 +1216,11 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, if (!strncasecmp("SPX", buf, strlen("SPX"))) { mode = AMDGPU_SPX_PARTITION_MODE; } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { - if (num_xcc != 4 || num_xcc != 8) + /* + * DPX mode needs AIDs to be in multiple of 2. + * Each AID connects 2 XCCs. + */ + if (num_xcc%4) return -EINVAL; mode = AMDGPU_DPX_PARTITION_MODE; } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { From f5fe7edfd6ce62cd23fbd707e7f9fe0f56a45e94 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 30 Sep 2022 09:16:21 -0400 Subject: [PATCH 335/861] drm/amdkfd: Update interrupt handling for GFX9.4.3 Update interrupt handling in CPX mode for GFX9.4.3 by using the VMID space instead of SDMA client id to determine if an interrupt should be processed by a KFD node. This is especially needed for handling retry faults from MMHUB. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 16 ++++++---------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +- 6 files changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c390b2856cc9..22a900f298f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2434,6 +2434,9 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * amdgpu_vm_handle_fault - graceful handling of VM faults. * @adev: amdgpu device pointer * @pasid: PASID of the VM + * @vmid: VMID, only used for GFX 9.4.3. + * @node_id: Node_id received in IH cookie. Only applicable for + * GFX 9.4.3. * @addr: Address of the fault * @write_fault: true is write fault, false is read fault * @@ -2441,7 +2444,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 client_id, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, bool write_fault) { bool is_compute_context = false; @@ -2466,7 +2469,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; - if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id, + if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, node_id, addr, write_fault)) { amdgpu_bo_unref(&root); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index dbab31647186..8add5f5eb92a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -455,7 +455,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 client_id, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c5752a349f3d..f2814270da40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -587,7 +587,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; - ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, addr, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) @@ -610,7 +610,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, addr, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index df372de6b056..fb3cf2c51da8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1073,18 +1073,14 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); -static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id, - uint32_t node_id) +static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, + uint32_t vmid) { - if ((node->interrupt_bitmap & (0x1U << node_id)) || - ((node_id % 4) == 0 && - (node->interrupt_bitmap >> 16) & (0x1U << client_id))) - return true; - - return false; + return (node->interrupt_bitmap & (1 << node_id)) != 0 && + (node->compute_vmid_bitmap & (1 << vmid)) != 0; } static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, - uint32_t client_id, uint32_t node_id) { + uint32_t node_id, uint32_t vmid) { struct kfd_dev *dev = adev->kfd.dev; uint32_t i; @@ -1092,7 +1088,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, return dev->nodes[0]; for (i = 0; i < dev->num_nodes; i++) - if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id)) + if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) return dev->nodes[i]; return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 0dafbbe954ca..5d6e02559d8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2799,7 +2799,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t client_id, uint32_t node_id, + uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { struct mm_struct *mm = NULL; @@ -2851,10 +2851,10 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } - node = kfd_node_by_irq_ids(adev, node_id, client_id); + node = kfd_node_by_irq_ids(adev, node_id, vmid); if (!node) { - pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id, - client_id); + pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, + vmid); r = -EFAULT; goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index a165c73b40b2..5116786718b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -173,7 +173,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, unsigned long addr, struct svm_range *parent, struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t client_id, uint32_t node_id, uint64_t addr, + uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, From 955220b04d42c41050158fec0f53957f320b96f9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 6 Oct 2022 15:25:08 +0530 Subject: [PATCH 336/861] drm/amdgpu: Fix programming of initial XCP mode On initialization set the partition mode correctly to SPX (default) or any other user specified partition mode. Use switch_compute_partition API so that all settings are initialized correctly. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 28 ++++++------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 73f652ad5b00..b6b7dbb62448 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1891,6 +1891,11 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) return r; } + if (adev->gfx.partition_mode == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + gfx_v9_4_3_switch_compute_partition( + adev, amdgpu_user_partt_mode); + /* set the virtual and physical id based on partition_mode */ gfx_v9_4_3_program_xcc_id(adev, i); @@ -2112,28 +2117,7 @@ static int gfx_v9_4_3_early_init(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); - adev->gfx.partition_mode = amdgpu_user_partt_mode; - /* calculate the num_xcc_in_xcp for the partition mode*/ - switch (amdgpu_user_partt_mode) { - case AMDGPU_SPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = num_xcc; - break; - case AMDGPU_DPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = num_xcc / 2; - break; - case AMDGPU_TPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = num_xcc / 3; - break; - case AMDGPU_QPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = num_xcc / 4; - break; - case AMDGPU_CPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = 1; - break; - default: - adev->gfx.num_xcc_per_xcp = num_xcc; - break; - } + adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); From 5a8b26a88639d69453d592ee11c03a24e0b62b9a Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 19 Sep 2022 18:08:25 +0530 Subject: [PATCH 337/861] drm/amdgpu: Fix register access on GC v9.4.3 In GC v9.4.3 there are multiple XCCs. It's required to use physical instance number to get the right register offset. Use GET_INST API for that. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b6b7dbb62448..f1e6da4d62a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1600,7 +1600,7 @@ static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id) int j; /* disable wptr polling */ - WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_WPTR_POLL_CNTL, EN, 0); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); @@ -1693,7 +1693,7 @@ static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id) mqd->cp_hqd_active); if (ring->use_doorbell) - WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); return 0; } From 6df442a03d1a839242397259fa13168858c52413 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 25 Apr 2022 22:19:58 +0800 Subject: [PATCH 338/861] drm/amdgpu: add new doorbell assignment table for aqua_vanjaram Four basic reasons as below to do the change: 1. number of ring expand a lot on aqua_vanjaram, and adjustment on old assignment cannot make each ring in a continuous doorbell space. 2. the SDMA doorbell index should not exceed 0x1FF on aqua_vanjaram due to regDOORBELLx_CTRL_ENTRY.BIF_DOORBELLx_RANGE_OFFSET_ENTRY field width. 3. re-design the doorbell assignment and unify the calculation as "start + ring/inst id" will make the code much concise. 4. only defining the START/END makes the table look simple v2: (Lijo) 1. replace name 2. use num_inst_per_aid/sdma_doorbell_range instead of hardcoding Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 52 +++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 594e303084c5..90f771423c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -78,7 +78,7 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o + nbio_v7_9.o aqua_vanjaram_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c new file mode 100644 index 000000000000..3b97bc922d4a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -0,0 +1,52 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" + +#include "soc15_common.h" + +void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) +{ + int i; + + adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START; + + adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START; + + adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START; + adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; + + adev->doorbell_index.sdma_doorbell_range = 20; + for (i = 0; i < adev->num_aid * adev->sdma.num_inst_per_aid; i++) + adev->doorbell_index.sdma_engine[i] = + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START + + i * (adev->doorbell_index.sdma_doorbell_range >> 1); + + adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH; + adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START; + + adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP; + adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP; + + adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; +} From cab7d478da112e66f2ad8eec7dcfc0aa2a5babe1 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 15:45:06 +0530 Subject: [PATCH 339/861] drm/amdgpu: Add IP instance map for aqua vanjaram Add XCC logical to physical instance map for aqua vanjaram v2: Keep look up table only for required IPs, for others return default mapping (Felix). Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 13 +++++++ .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 39 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 3 files changed, 53 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b58d94dc1924..915ae0bcdab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1951,6 +1951,17 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) return 0; } +static void amdgpu_discovery_ip_map_init(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 3): + aqua_vanjaram_ip_map_init(adev); + break; + default: + break; + } +} + int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) { int r; @@ -2128,6 +2139,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } + amdgpu_discovery_ip_map_init(adev); + switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 3b97bc922d4a..dbff8220fc19 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -50,3 +50,42 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } + +static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst) +{ + int8_t dev_inst; + + switch (block) { + case GC_HWIP: + dev_inst = adev->ip_map.dev_inst[block][inst]; + break; + default: + /* For rest of the IPs, no look up required. + * Assume 'logical instance == physical instance' for all configs. */ + dev_inst = inst; + break; + } + + return dev_inst; +} + +void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) +{ + int xcc_mask; + int l, i; + + /* Map GC instances */ + l = 0; + xcc_mask = adev->gfx.xcc_mask; + while (xcc_mask) { + i = ffs(xcc_mask) - 1; + adev->ip_map.dev_inst[GC_HWIP][l++] = i; + xcc_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[GC_HWIP][l] = -1; + + adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 2b41ee968dd1..d8a2a6c2c6e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -111,6 +111,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); +void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); From cf24f6a0d7137bc703a23187ffa4a65ed3f17820 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 16:26:49 +0530 Subject: [PATCH 340/861] drm/amdgpu: Add mask for SDMA instances Add a mask of SDMA instances available for use. On certain ASIC configs, not all SDMA instances are available for software use. v2: Change sdma mask type to uint32_t (Le) Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index dbff8220fc19..5a1511a22367 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -37,7 +37,7 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; adev->doorbell_index.sdma_doorbell_range = 20; - for (i = 0; i < adev->num_aid * adev->sdma.num_inst_per_aid; i++) + for (i = 0; i < adev->sdma.num_instances; i++) adev->doorbell_index.sdma_engine[i] = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START + i * (adev->doorbell_index.sdma_doorbell_range >> 1); From 5c606836eff823ea14c481ad6374bc6d87121182 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 21:04:39 +0530 Subject: [PATCH 341/861] drm/amdgpu: Use SDMA instance table for aqua vanjaram For aqua vanjaram, add mapping for logical to physical instances. v2: Register accesses on bare metal should be based on physical instance. Use GET_INST() to get physical instance. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 5a1511a22367..12379c374457 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -59,6 +59,7 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, switch (block) { case GC_HWIP: + case SDMA0_HWIP: dev_inst = adev->ip_map.dev_inst[block][inst]; break; default: @@ -73,7 +74,7 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) { - int xcc_mask; + int xcc_mask, sdma_mask; int l, i; /* Map GC instances */ @@ -87,5 +88,15 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) for (; l < HWIP_MAX_INSTANCE; l++) adev->ip_map.dev_inst[GC_HWIP][l] = -1; + l = 0; + sdma_mask = adev->sdma.sdma_mask; + while (sdma_mask) { + i = ffs(sdma_mask) - 1; + adev->ip_map.dev_inst[SDMA0_HWIP][l++] = i; + sdma_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[SDMA0_HWIP][l] = -1; + adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } From 1dfcdc30270a80ba5b45f922833c0c0e56d82576 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 24 May 2022 19:44:27 +0800 Subject: [PATCH 342/861] drm/amdgpu: switch to aqua_vanjaram_doorbell_index_init New doorbell index assignment is used by aqua_vanjaram. Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 24 +++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index b9bcb12bff91..29d2e08834c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -853,6 +853,28 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .query_video_codecs = &soc15_query_video_codecs, }; +static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = +{ + .read_disabled_bios = &soc15_read_disabled_bios, + .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, + .read_register = &soc15_read_register, + .reset = &soc15_asic_reset, + .reset_method = &soc15_asic_reset_method, + .set_vga_state = &soc15_vga_set_state, + .get_xclk = &soc15_get_xclk, + .set_uvd_clocks = &soc15_set_uvd_clocks, + .set_vce_clocks = &soc15_set_vce_clocks, + .get_config_memsize = &soc15_get_config_memsize, + .need_full_reset = &soc15_need_full_reset, + .init_doorbell_index = &aqua_vanjaram_doorbell_index_init, + .get_pcie_usage = &vega20_get_pcie_usage, + .need_reset_on_init = &soc15_need_reset_on_init, + .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .supports_baco = &soc15_supports_baco, + .pre_asic_init = &soc15_pre_asic_init, + .query_video_codecs = &soc15_query_video_codecs, +}; + static int soc15_common_early_init(void *handle) { #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) @@ -1096,7 +1118,7 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x3c; break; case IP_VERSION(9, 4, 3): - adev->asic_funcs = &vega20_asic_funcs; + adev->asic_funcs = &aqua_vanjaram_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index d8a2a6c2c6e3..9cc2dda087c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -115,4 +115,5 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); +void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev); #endif From fa9e78d14070c55a47dc092f6d18364680b83f44 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 31 Aug 2022 17:11:59 +0800 Subject: [PATCH 343/861] drm/amdgpu: update ip discovery header to v4 version 4 supports 64bit ip base address Signed-off-by: Le Ma Signed-off-by: Hawking Zhang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/discovery.h | 30 ++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index f150404ffc68..9181e57887db 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -79,7 +79,14 @@ typedef struct ip_discovery_header uint32_t id; /* Table ID */ uint16_t num_dies; /* Number of Dies */ die_info die_info[16]; /* list die information for up to 16 dies */ - uint16_t padding[1]; /* padding */ + union { + uint16_t padding[1]; /* version <= 3 */ + struct { /* version == 4 */ + uint8_t base_addr_64_bit : 1; /* ip structures are using 64 bit base address */ + uint8_t reserved : 7; + uint8_t reserved2; + }; + }; } ip_discovery_header; typedef struct ip @@ -118,6 +125,26 @@ typedef struct ip_v3 uint32_t base_address[1]; /* Base Address list. Corresponds to the num_base_address field*/ } ip_v3; +typedef struct ip_v4 { + uint16_t hw_id; /* Hardware ID */ + uint8_t instance_number; /* Instance number for the IP */ + uint8_t num_base_address; /* Number of base addresses*/ + uint8_t major; /* Hardware ID.major version */ + uint8_t minor; /* Hardware ID.minor version */ + uint8_t revision; /* Hardware ID.revision version */ +#if defined(LITTLEENDIAN_CPU) + uint8_t sub_revision : 4; /* HCID Sub-Revision */ + uint8_t variant : 4; /* HW variant */ +#elif defined(BIGENDIAN_CPU) + uint8_t variant : 4; /* HW variant */ + uint8_t sub_revision : 4; /* HCID Sub-Revision */ +#endif + union { + uint32_t base_address[0]; /* 32-bit Base Address list. Corresponds to the num_base_address field*/ + uint64_t base_address_64[0]; /* 64-bit Base Address list. Corresponds to the num_base_address field*/ + } __packed; +} ip_v4; + typedef struct die_header { uint16_t die_id; @@ -134,6 +161,7 @@ typedef struct ip_structure { ip *ip_list; ip_v3 *ip_v3_list; + ip_v4 *ip_v4_list; }; /* IP list. Variable size*/ } die; } ip_structure; From c3729997a23e8955f017b6286bd6c73b386fbe49 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 6 Sep 2022 15:22:44 +0800 Subject: [PATCH 344/861] drm/amdgpu: increase DISCOVERY_TMR_SIZE New ip_discovery binary size is increased. Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 63ec6924b907..3a2f347bd50d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,7 +24,7 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ -#define DISCOVERY_TMR_SIZE (4 << 10) +#define DISCOVERY_TMR_SIZE (8 << 10) #define DISCOVERY_TMR_OFFSET (64 << 10) void amdgpu_discovery_fini(struct amdgpu_device *adev); From 7e0eebdc4745b7e0f031df571621204fd9d6288e Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 14 Sep 2022 15:07:23 +0800 Subject: [PATCH 345/861] drm/amdgpu: extend max instances Number of instances is extended. Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 32112db5c841..e17a3697a23f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -661,7 +661,7 @@ enum amd_hw_ip_block_type { MAX_HWIP }; -#define HWIP_MAX_INSTANCE 28 +#define HWIP_MAX_INSTANCE 44 #define HW_ID_MAX 300 #define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 1024a06359ca..6ea3f076257e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -32,7 +32,7 @@ #define AMDGPU_VCN_FIRMWARE_OFFSET 256 #define AMDGPU_VCN_MAX_ENC_RINGS 3 -#define AMDGPU_MAX_VCN_INSTANCES 2 +#define AMDGPU_MAX_VCN_INSTANCES 4 #define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) From 3955b14134285f99e0763ba74b8b1b35faed37ed Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 20 Sep 2022 15:14:48 +0800 Subject: [PATCH 346/861] drm/amdgpu: do some register access cleanup in nbio v7_9 Use WREG_SOC15x() instead of WREG32(SOC15_REG_OFFSET()) Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 5dbab8ab9b27..b8e4afb8c791 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -98,7 +98,7 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan switch (dev_inst % adev->sdma.num_inst_per_aid) { case 0: - WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1) + + WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1, 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, @@ -118,7 +118,7 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan doorbell_ctrl); break; case 1: - WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2) + + WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2, 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, @@ -138,7 +138,7 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan doorbell_ctrl); break; case 2: - WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3) + + WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3, 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, @@ -157,7 +157,7 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan doorbell_ctrl); break; case 3: - WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4) + + WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4, 4 * aid_id, doorbell_range); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, @@ -219,7 +219,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4); - WREG32(SOC15_REG_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17) + + WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, aid_id, doorbell_range); WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL) * 4 + AMDGPU_SMN_TARGET_AID(aid_id) @@ -233,7 +233,8 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_RANGE_SIZE, 0); - WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, doorbell_range); + WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, + aid_id, doorbell_range); WREG32(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL), doorbell_ctrl); } From aabb478421f5ca2be0f1343d02873394e935c582 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 7 Sep 2022 21:17:03 +0800 Subject: [PATCH 347/861] drm/amdgpu: upgrade amdgpu_discovery struct ip to ip_v4 version 4 supports 64bit ip base address Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 915ae0bcdab2..2b573675bc7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -471,11 +471,11 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev) adev->mman.discovery_bin = NULL; } -static int amdgpu_discovery_validate_ip(const struct ip *ip) +static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip) { - if (ip->number_instance >= HWIP_MAX_INSTANCE) { - DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", - ip->number_instance); + if (ip->instance_number >= HWIP_MAX_INSTANCE) { + DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n", + ip->instance_number); return -EINVAL; } if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { @@ -493,7 +493,7 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; - struct ip *ip; + struct ip_v4 *ip; uint16_t die_offset, ip_offset, num_dies, num_ips; int i, j; @@ -510,16 +510,16 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, ip_offset = die_offset + sizeof(*dhdr); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); if (amdgpu_discovery_validate_ip(ip)) goto next_ip; - if (le16_to_cpu(ip->harvest) == 1) { + if (le16_to_cpu(ip->variant) == 1) { switch (le16_to_cpu(ip->hw_id)) { case VCN_HWID: (*vcn_harvest_count)++; - if (ip->number_instance == 0) + if (ip->instance_number == 0) adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; else adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; @@ -852,10 +852,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, size_t ip_offset = _ip_offset; for (jj = 0; jj < num_ips; jj++) { - struct ip *ip; + struct ip_v4 *ip; struct ip_hw_instance *ip_hw_instance; - ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); if (amdgpu_discovery_validate_ip(ip) || le16_to_cpu(ip->hw_id) != ii) goto next_ip; @@ -903,11 +903,11 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, return -ENOMEM; } ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ - ip_hw_instance->num_instance = ip->number_instance; + ip_hw_instance->num_instance = ip->instance_number; ip_hw_instance->major = ip->major; ip_hw_instance->minor = ip->minor; ip_hw_instance->revision = ip->revision; - ip_hw_instance->harvest = ip->harvest; + ip_hw_instance->harvest = ip->variant; ip_hw_instance->num_base_addresses = ip->num_base_address; for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) @@ -1082,7 +1082,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; - struct ip *ip; + struct ip_v4 *ip; uint16_t die_offset; uint16_t ip_offset; uint16_t num_dies; @@ -1121,7 +1121,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(dhdr->die_id), num_ips); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); if (amdgpu_discovery_validate_ip(ip)) goto next_ip; @@ -1131,7 +1131,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", hw_id_names[le16_to_cpu(ip->hw_id)], le16_to_cpu(ip->hw_id), - ip->number_instance, + ip->instance_number, ip->major, ip->minor, ip->revision); @@ -1182,7 +1182,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { DRM_DEBUG("set register base offset for %s\n", hw_id_names[le16_to_cpu(ip->hw_id)]); - adev->reg_offset[hw_ip][ip->number_instance] = + adev->reg_offset[hw_ip][ip->instance_number] = ip->base_address; /* Instance support is somewhat inconsistent. * SDMA is a good example. Sienna cichlid has 4 total @@ -1193,7 +1193,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) * example. On most chips there are multiple instances * with the same HWID. */ - adev->ip_versions[hw_ip][ip->number_instance] = + adev->ip_versions[hw_ip][ip->instance_number] = IP_VERSION(ip->major, ip->minor, ip->revision); } } From 7d158f52bfbf82279ea8f1c3dc7a3ab3065a76e4 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 27 Sep 2022 17:26:27 +0800 Subject: [PATCH 348/861] drm/amdgpu: parse base address from new ip discovery with 64bit ip base address Truncate the 64bit base address from ip discovery and only store lower 32bit ip base in reg_offset[]. Bits > 32 follows ASIC specific format, thus just discard them and handle it within specific ASIC. By this way reg_offset[] and related helpers can stay unchanged. v2: make comments more generic Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 49 +++++++++++++++---- 1 file changed, 40 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 2b573675bc7e..71293db09009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -532,7 +532,10 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, } } next_ip: - ip_offset += struct_size(ip, base_address, ip->num_base_address); + if (ihdr->base_addr_64_bit) + ip_offset += struct_size(ip, base_address_64, ip->num_base_address); + else + ip_offset += struct_size(ip, base_address, ip->num_base_address); } } } @@ -838,7 +841,8 @@ static void ip_disc_release(struct kobject *kobj) static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, struct ip_die_entry *ip_die_entry, - const size_t _ip_offset, const int num_ips) + const size_t _ip_offset, const int num_ips, + bool reg_base_64) { int ii, jj, kk, res; @@ -910,15 +914,25 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, ip_hw_instance->harvest = ip->variant; ip_hw_instance->num_base_addresses = ip->num_base_address; - for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) - ip_hw_instance->base_addr[kk] = ip->base_address[kk]; + for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { + if (reg_base_64) + ip_hw_instance->base_addr[kk] = + lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; + else + ip_hw_instance->base_addr[kk] = ip->base_address[kk]; + } kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; res = kobject_add(&ip_hw_instance->kobj, NULL, "%d", ip_hw_instance->num_instance); next_ip: - ip_offset += struct_size(ip, base_address, ip->num_base_address); + if (reg_base_64) + ip_offset += struct_size(ip, base_address_64, + ip->num_base_address); + else + ip_offset += struct_size(ip, base_address, + ip->num_base_address); } } @@ -972,7 +986,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) return res; } - amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips); + amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); } return 0; @@ -1174,12 +1188,26 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) * convert the endianness of base addresses in place, * so that we don't need to convert them when accessing adev->reg_offset. */ - ip->base_address[k] = le32_to_cpu(ip->base_address[k]); + if (ihdr->base_addr_64_bit) + /* Truncate the 64bit base address from ip discovery + * and only store lower 32bit ip base in reg_offset[]. + * Bits > 32 follows ASIC specific format, thus just + * discard them and handle it within specific ASIC. + * By this way reg_offset[] and related helpers can + * stay unchanged. + * The base address is in dwords, thus clear the + * highest 2 bits to store. + */ + ip->base_address[k] = + lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; + else + ip->base_address[k] = le32_to_cpu(ip->base_address[k]); DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); } for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { - if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { + if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && + hw_id_map[hw_ip] != 0) { DRM_DEBUG("set register base offset for %s\n", hw_id_names[le16_to_cpu(ip->hw_id)]); adev->reg_offset[hw_ip][ip->instance_number] = @@ -1199,7 +1227,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) } next_ip: - ip_offset += struct_size(ip, base_address, ip->num_base_address); + if (ihdr->base_addr_64_bit) + ip_offset += struct_size(ip, base_address_64, ip->num_base_address); + else + ip_offset += struct_size(ip, base_address, ip->num_base_address); } } From 2fa480d36eb302712e48dce4d2f6564b24426be3 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 27 Sep 2022 17:51:33 +0800 Subject: [PATCH 349/861] drm/amdgpu: add helpers to access registers on different AIDs SMN address which is larger than 32bit has different indications through bit[34:32] on different AIDs. v2: put smn addressing of different AIDs into asic specific place v3: change to ext_id/ext_offset naming Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 19 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + drivers/gpu/drm/amd/amdgpu/soc15_common.h | 10 ++++++++++ 5 files changed, 33 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e17a3697a23f..4c4ce33c8359 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -574,6 +574,8 @@ struct amdgpu_asic_funcs { /* query video codecs */ int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs); + /* encode "> 32bits" smn addressing */ + u64 (*encode_ext_smn_addressing)(int ext_id); }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 12379c374457..2616bdb40418 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -100,3 +100,22 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } + +/* Fixed pattern for smn addressing on different AIDs: + * bit[34]: indicate cross AID access + * bit[33:32]: indicate target AID id + * AID id range is 0 ~ 3 as maximum AID number is 4. + */ +u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) +{ + u64 ext_offset; + + /* local routing and bit[34:32] will be zeros */ + if (ext_id == 0) + return 0; + + /* Initiated from host, accessing to all non-zero aids are cross traffic */ + ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34); + + return ext_offset; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 29d2e08834c7..206013d276c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -873,6 +873,7 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .supports_baco = &soc15_supports_baco, .pre_asic_init = &soc15_pre_asic_init, .query_video_codecs = &soc15_query_video_codecs, + .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing, }; static int soc15_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 9cc2dda087c4..dd48db09aa51 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -112,6 +112,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); +u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 39e4406da4ae..1c9e924b5f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -188,4 +188,14 @@ #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP) +/* inst equals to ext for some IPs */ +#define RREG32_SOC15_EXT(ip, inst, reg, ext) \ + RREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \ + + adev->asic_funcs->encode_ext_smn_addressing(ext)) \ + +#define WREG32_SOC15_EXT(ip, inst, reg, ext, value) \ + WREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \ + + adev->asic_funcs->encode_ext_smn_addressing(ext), \ + value) \ + #endif From 369576c2d5a8e8c3d7efb9d598970ba3f006b07e Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 22 Sep 2022 15:46:54 +0800 Subject: [PATCH 350/861] drm/amdgpu: consolidate the access helpers in nbio v7_9 Use WREG32_SOC15_EXT to write registers with address larger than 32bit. Signed-off-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 45 +++++++------------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index b8e4afb8c791..78eab4d48e38 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -62,15 +62,6 @@ static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev) return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); } -#define S2A_DOORBELL_REG_LSD_OFFSET 0x40 - -/* Temporarily add 2 macros below. Range is 0 ~ 3 as total AID number is 4. - * They will be obsoleted after the latest ip offset header - * is imported in driver in near future. - */ -#define AMDGPU_SMN_TARGET_AID(x) ((u64)(x) << 32) -#define AMDGPU_SMN_CROSS_AID (1ULL << 34) - static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size) { @@ -111,11 +102,8 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x1); - WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL) - + S2A_DOORBELL_REG_LSD_OFFSET) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); + WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_1_CTRL, + aid_id, doorbell_ctrl); break; case 1: WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2, @@ -131,11 +119,8 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x2); - WREG32_PCIE_EXT((SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL) - + S2A_DOORBELL_REG_LSD_OFFSET) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); + WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_2_CTRL, + aid_id, doorbell_ctrl); break; case 2: WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3, @@ -151,10 +136,8 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x8); - WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); + WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_5_CTRL, + aid_id, doorbell_ctrl); break; case 3: WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4, @@ -170,10 +153,8 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x9); - WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_6_CTRL) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); + WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_6_CTRL, + aid_id, doorbell_ctrl); break; default: break; @@ -221,10 +202,8 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, aid_id, doorbell_range); - WREG32_PCIE_EXT(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL) * 4 - + AMDGPU_SMN_TARGET_AID(aid_id) - + AMDGPU_SMN_CROSS_AID * !!aid_id, - doorbell_ctrl); + WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL, + aid_id, doorbell_ctrl); } else { doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, @@ -235,8 +214,8 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, aid_id, doorbell_range); - WREG32(SOC15_REG_OFFSET(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL), - doorbell_ctrl); + WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL, + aid_id, doorbell_ctrl); } } From 870d1e5afca58261a147e9080abb8cc75fccb849 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 14 Oct 2022 13:06:18 +0530 Subject: [PATCH 351/861] drm/amdgpu: Fix interrupt handling in GFX v9.4.3 IH follows a different identification scheme for its clients. Get the right mapping of xcc instance from IH node id. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 35 +++++++++++++++++++++---- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index f1e6da4d62a3..c5ffcd70ec7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2730,11 +2730,24 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, return 0; } +static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) +{ + int xcc; + + xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); + if (!xcc) { + dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); + return -EINVAL; + } + + return xcc - 1; +} + static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - int i, phys_id; + int i, xcc_id; u8 me_id, pipe_id, queue_id; struct amdgpu_ring *ring; @@ -2743,14 +2756,19 @@ static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, pipe_id = (entry->ring_id & 0x03) >> 0; queue_id = (entry->ring_id & 0x70) >> 4; - phys_id = node_id_to_phys_map[entry->node_id]; + xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); + + if (xcc_id == -EINVAL) + return -EINVAL; switch (me_id) { case 0: case 1: case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i + phys_id * adev->gfx.num_compute_rings]; + ring = &adev->gfx.compute_ring + [i + + xcc_id * adev->gfx.num_compute_rings]; /* Per-queue interrupt is supported for MEC starting from VI. * The interrupt can only be enabled/disabled per pipe instead of per queue. */ @@ -2768,18 +2786,25 @@ static void gfx_v9_4_3_fault(struct amdgpu_device *adev, { u8 me_id, pipe_id, queue_id; struct amdgpu_ring *ring; - int i; + int i, xcc_id; me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; queue_id = (entry->ring_id & 0x70) >> 4; + xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); + + if (xcc_id == -EINVAL) + return; + switch (me_id) { case 0: case 1: case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; + ring = &adev->gfx.compute_ring + [i + + xcc_id * adev->gfx.num_compute_rings]; if (ring->me == me_id && ring->pipe == pipe_id && ring->queue == queue_id) drm_sched_fault(&ring->sched); From 57a83b2dafff055698190d3fe3d7197c969c2dcc Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 23 Oct 2022 20:27:59 +0800 Subject: [PATCH 352/861] drm/amdgpu: init gfx_v9_4_3 external_rev_id it is used for user space driver to identify gfx_v9_4_3 chip Signed-off-by: Hawking Zhang Acked-by: Alex Deucher Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 206013d276c1..06a18b2f6e04 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1127,6 +1127,7 @@ static int soc15_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; + adev->external_rev_id = adev->rev_id + 0x46; break; default: /* FIXME: not supported yet */ From 5f09237b82e51be8a1849eb658dcb153748f8f60 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 31 Oct 2022 10:46:05 +0530 Subject: [PATCH 353/861] drm/amdgpu: Add SDMA v4.4.2 golden settings Add programming of SDMA golden settings for v4.4.2 Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 4350939105c5..016813b295ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -96,11 +96,22 @@ static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id) static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->ip_versions[SDMA0_HWIP][0]) { - case IP_VERSION(4, 4, 2): - break; - default: - break; + u32 val; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG); + val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG, NUM_BANKS, 4); + val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG, + PIPE_INTERLEAVE_SIZE, 0); + WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG, val); + + val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ); + val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ, NUM_BANKS, + 4); + val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ, + PIPE_INTERLEAVE_SIZE, 0); + WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ, val); } } From 553f973a0d7bbe95ea5da46979d926a9c0ada109 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 11 Oct 2022 09:52:58 -0400 Subject: [PATCH 354/861] drm/amd/amdgpu: Update debugfs for XCC support (v3) This patch updates the 'regs2' interface for MMIO registers to add a new IOCTL command for a 'v2' state data that includes the XCC ID. This patch then updates amdgpu_gfx_select_se_sh() and amdgpu_gfx_select_me_pipe_q() (and the implementations in the gfx drivers) to support an additional parameter. This patch then creates a new debugfs interface "gprwave" which is a merge of shader GPR and wave status access. This new inteface uses an IOCTL to select banks as well as XCC identity. (v2) Fix missing xcc_id in wave_ind function (v3) Fix pm runtime calls and mutex locking (v4) Fix bad label Signed-off-by: Tom St Denis Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 200 ++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h | 36 +++- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 56 +++--- 10 files changed, 272 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index df94cd2c4b39..e94d0cf3f793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -139,7 +139,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, sh_bank, instance_bank, 0); } else if (use_ring) { mutex_lock(&adev->srbm_mutex); - amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); + amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0); } if (pm_pg_lock) @@ -172,7 +172,7 @@ end: amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } else if (use_ring) { - amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } @@ -263,14 +263,14 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, - rd->id.grbm.sh, - rd->id.grbm.instance, 0); + rd->id.grbm.sh, + rd->id.grbm.instance, rd->id.xcc_id); } if (rd->id.use_srbm) { mutex_lock(&adev->srbm_mutex); amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, - rd->id.srbm.queue, rd->id.srbm.vmid); + rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id); } if (rd->id.pg_lock) @@ -296,12 +296,12 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } end: if (rd->id.use_grbm) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id); mutex_unlock(&adev->grbm_idx_mutex); } if (rd->id.use_srbm) { - amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id); mutex_unlock(&adev->srbm_mutex); } @@ -320,19 +320,45 @@ end: static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) { struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_debugfs_regs2_iocdata v1_data; int r; + mutex_lock(&rd->lock); + switch (cmd) { - case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: - mutex_lock(&rd->lock); - r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2: + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data, sizeof(rd->id)); - mutex_unlock(&rd->lock); - return r ? -EINVAL : 0; + if (r) + r = -EINVAL; + goto done; + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: + r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data, + sizeof(v1_data)); + if (r) { + r = -EINVAL; + goto done; + } + goto v1_copy; default: - return -EINVAL; + r = -EINVAL; + goto done; } - return 0; + +v1_copy: + rd->id.use_srbm = v1_data.use_srbm; + rd->id.use_grbm = v1_data.use_grbm; + rd->id.pg_lock = v1_data.pg_lock; + rd->id.grbm.se = v1_data.grbm.se; + rd->id.grbm.sh = v1_data.grbm.sh; + rd->id.grbm.instance = v1_data.grbm.instance; + rd->id.srbm.me = v1_data.srbm.me; + rd->id.srbm.pipe = v1_data.srbm.pipe; + rd->id.srbm.queue = v1_data.srbm.queue; + rd->id.xcc_id = 0; +done: + mutex_unlock(&rd->lock); + return r; } static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -345,6 +371,135 @@ static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); } +static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_gprwave_data *rd; + + rd = kzalloc(sizeof *rd, GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_gprwave_data *rd = file->private_data; + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + struct amdgpu_debugfs_gprwave_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t *data, x; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); + if (!data) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + return -ENOMEM; + } + + /* switch to the specific se/sh/cu */ + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id); + + if (!rd->id.gpr_or_wave) { + x = 0; + if (adev->gfx.funcs->read_wave_data) + adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x); + } else { + x = size >> 2; + if (rd->id.gpr.vpgr_or_sgpr) { + if (adev->gfx.funcs->read_wave_vgprs) + adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data); + } else { + if (adev->gfx.funcs->read_wave_sgprs) + adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data); + } + } + + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + if (!x) { + result = -EINVAL; + goto done; + } + + while (size && (*pos < x * 4)) { + uint32_t value; + + value = data[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + result = r; + goto done; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + +done: + amdgpu_virt_disable_access_debugfs(adev); + kfree(data); + return result; +} + +static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_gprwave_data *rd = f->private_data; + int r; + + mutex_lock(&rd->lock); + + switch (cmd) { + case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE: + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_gprwave_iocdata *)data, sizeof rd->id); + if (r) + return r ? -EINVAL : 0; + goto done; + default: + r = -EINVAL; + goto done; + } + +done: + mutex_unlock(&rd->lock); + return r; +} + + + /** * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register @@ -913,7 +1068,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, x = 0; if (adev->gfx.funcs->read_wave_data) - adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); + adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x); amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); @@ -1007,10 +1162,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (bank == 0) { if (adev->gfx.funcs->read_wave_vgprs) - adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); + adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data); } else { if (adev->gfx.funcs->read_wave_sgprs) - adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); + adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data); } amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); @@ -1341,6 +1496,15 @@ static const struct file_operations amdgpu_debugfs_regs2_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gprwave_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl, + .read = amdgpu_debugfs_gprwave_read, + .open = amdgpu_debugfs_gprwave_open, + .release = amdgpu_debugfs_gprwave_release, + .llseek = default_llseek +}; + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -1418,6 +1582,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = { static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs2_fops, + &amdgpu_debugfs_gprwave_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, @@ -1434,6 +1599,7 @@ static const struct file_operations *debugfs_regs[] = { static const char * const debugfs_regs_names[] = { "amdgpu_regs", "amdgpu_regs2", + "amdgpu_gprwave", "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 93f9875154db..2b2d75763875 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -247,16 +247,16 @@ struct amdgpu_gfx_funcs { uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance, int xcc_id); - void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, + void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); - void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, + void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst); - void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, + void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst); void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, - u32 queue, u32 vmid); + u32 queue, u32 vmid, u32 xcc_id); void (*init_spm_golden)(struct amdgpu_device *adev); void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); int (*get_gfx_shadow_info)(struct amdgpu_device *adev, @@ -405,7 +405,7 @@ struct amdgpu_gfx { #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) -#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) +#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) #define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h index 919d9d401750..107f9bb0e24f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h @@ -35,17 +35,51 @@ struct amdgpu_debugfs_regs2_iocdata { } srbm; }; +struct amdgpu_debugfs_regs2_iocdata_v2 { + __u32 use_srbm, use_grbm, pg_lock; + struct { + __u32 se, sh, instance; + } grbm; + struct { + __u32 me, pipe, queue, vmid; + } srbm; + u32 xcc_id; +}; + +struct amdgpu_debugfs_gprwave_iocdata { + u32 gpr_or_wave, se, sh, cu, wave, simd, xcc_id; + struct { + u32 thread, vpgr_or_sgpr; + } gpr; +}; + /* * MMIO debugfs state data (per file* handle) */ struct amdgpu_debugfs_regs2_data { struct amdgpu_device *adev; struct mutex lock; - struct amdgpu_debugfs_regs2_iocdata id; + struct amdgpu_debugfs_regs2_iocdata_v2 id; +}; + +struct amdgpu_debugfs_gprwave_data { + struct amdgpu_device *adev; + struct mutex lock; + struct amdgpu_debugfs_gprwave_iocdata id; }; enum AMDGPU_DEBUGFS_REGS2_CMDS { AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0, + AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, }; +enum AMDGPU_DEBUGFS_GPRWAVE_CMDS { + AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0, +}; + +//reg2 interface #define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata) +#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2 _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, struct amdgpu_debugfs_regs2_iocdata_v2) + +//gprwave interface +#define AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE, struct amdgpu_debugfs_gprwave_iocdata) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 7b585141e10e..89158c72753e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4291,7 +4291,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); } -static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* in gfx10 the SIMD_ID is specified as part of the INSTANCE * field when performing a select_se_sh so it should be @@ -4318,7 +4318,7 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } -static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -4329,7 +4329,7 @@ static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, dst); } -static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) @@ -4340,7 +4340,7 @@ static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { nv_grbm_select(adev, me, pipe, q, vm); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 790df2cc3480..4b7224de879e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -765,7 +765,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); } -static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* in gfx11 the SIMD_ID is specified as part of the INSTANCE * field when performing a select_se_sh so it should be @@ -791,7 +791,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } -static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -802,7 +802,7 @@ static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, dst); } -static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) @@ -813,7 +813,7 @@ static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { soc21_grbm_select(adev, me, pipe, q, vm); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7cb72bf1acdd..809558c718e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2968,7 +2968,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -2993,7 +2993,7 @@ static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -3003,7 +3003,7 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { DRM_INFO("Not implemented\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d56dda5fc588..0f0c12bbe228 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4112,7 +4112,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -4137,7 +4137,7 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -4147,7 +4147,7 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { cik_srbm_select(adev, me, pipe, q, vm); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 2ae7f167985f..6d0589dc1d6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3419,7 +3419,7 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, } static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { vi_srbm_select(adev, me, pipe, q, vm); } @@ -5217,7 +5217,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -5242,7 +5242,7 @@ static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index da69177dc76f..cc005e3bcd40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1788,7 +1788,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); } -static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 1 wave data */ dst[(*no_fields)++] = 1; @@ -1809,7 +1809,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -1818,7 +1818,7 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, start + SQIND_WAVE_SGPRS_OFFSET, size, dst); } -static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) @@ -1829,7 +1829,7 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { soc15_grbm_select(adev, me, pipe, q, vm, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c5ffcd70ec7b..76e3571ec5c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -536,21 +536,21 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); } -static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) +static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) { - WREG32_SOC15_RLC(GC, GET_INST(GC, 0), regSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (address << SQ_IND_INDEX__INDEX__SHIFT) | (SQ_IND_INDEX__FORCE_READ_MASK)); - return RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_IND_DATA); + return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); } -static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, +static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t regno, uint32_t num, uint32_t *out) { - WREG32_SOC15_RLC(GC, GET_INST(GC, 0), regSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (regno << SQ_IND_INDEX__INDEX__SHIFT) | @@ -558,53 +558,53 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, (SQ_IND_INDEX__FORCE_READ_MASK) | (SQ_IND_INDEX__AUTO_INCR_MASK)); while (num--) - *(out++) = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_IND_DATA); + *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); } static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, - uint32_t simd, uint32_t wave, + uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 1 wave data */ dst[(*no_fields)++] = 1; - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { - wave_read_regs(adev, simd, wave, 0, + wave_read_regs(adev, xcc_id, simd, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, dst); } -static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) { - wave_read_regs(adev, simd, wave, thread, + wave_read_regs(adev, xcc_id, simd, wave, thread, start + SQIND_WAVE_VGPRS_OFFSET, size, dst); } static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { - soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, 0)); + soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } static enum amdgpu_gfx_partition From ebadc1061e045e961339e0df7b8a07f3e589579c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 7 Nov 2022 11:11:29 +0800 Subject: [PATCH 355/861] drm/amdgpu: retire render backend setup from gfx_v9_4_3 gfx v9_4_3 only support compute. render backend doesn't need to be involved in any compute shader execution. Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 44 ------------------------- 1 file changed, 44 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 76e3571ec5c8..6ed97371ff1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -888,46 +888,6 @@ static int gfx_v9_4_3_sw_fini(void *handle) return 0; } -static u32 gfx_v9_4_3_get_rb_active_bitmap(struct amdgpu_device *adev) -{ - u32 data, mask; - - data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_RB_BACKEND_DISABLE); - data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_RB_BACKEND_DISABLE); - - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; - - mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / - adev->gfx.config.max_sh_per_se); - - return (~data) & mask; -} - -static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev, int xcc_id) -{ - int i, j; - u32 data; - u32 active_rbs = 0; - u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / - adev->gfx.config.max_sh_per_se; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); - data = gfx_v9_4_3_get_rb_active_bitmap(adev); - active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * - rb_bitmap_width_per_sh); - } - } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); - - adev->gfx.config.backend_enable_mask = active_rbs; - adev->gfx.config.num_rbs = hweight32(active_rbs); -} - #define DEFAULT_SH_MEM_BASES (0x6000) static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev, int xcc_id) { @@ -991,10 +951,6 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) int i, j, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), GRBM_CNTL, READ_TIMEOUT, 0xff); - gfx_v9_4_3_setup_rb(adev, i); - } gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); adev->gfx.config.db_debug2 = RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); From 63121b11a95ccd30763e6def363f8fbe992b7a3f Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 24 Apr 2022 15:23:08 +0800 Subject: [PATCH 356/861] drm/amdgpu: add smuio v13_0_3 ip headers Add smuio v13_0_3 register offset and shift masks header files v2: update headers (Alex) Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../asic_reg/smuio/smuio_13_0_3_offset.h | 177 ++++++++ .../asic_reg/smuio/smuio_13_0_3_sh_mask.h | 428 ++++++++++++++++++ 2 files changed, 605 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h new file mode 100644 index 000000000000..b62b489402c5 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h @@ -0,0 +1,177 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _smuio_13_0_3_OFFSET_HEADER +#define _smuio_13_0_3_OFFSET_HEADER + + + +// addressBlock: aid_smuio_smuio_reset_SmuSmuioDec +// base address: 0x5a300 +#define regSMUIO_MP_RESET_INTR 0x00c1 +#define regSMUIO_MP_RESET_INTR_BASE_IDX 1 +#define regSMUIO_SOC_HALT 0x00c2 +#define regSMUIO_SOC_HALT_BASE_IDX 1 + + +// addressBlock: aid_smuio_smuio_tsc_SmuSmuioDec +// base address: 0x5a8a0 +#define regPWROK_REFCLK_GAP_CYCLES 0x0028 +#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 2 +#define regGOLDEN_TSC_INCREMENT_UPPER 0x002b +#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 2 +#define regGOLDEN_TSC_INCREMENT_LOWER 0x002c +#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 2 +#define regGOLDEN_TSC_COUNT_UPPER 0x002d +#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 2 +#define regGOLDEN_TSC_COUNT_LOWER 0x002e +#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 2 +#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x002f +#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 2 +#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0030 +#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 2 +#define regSOC_GAP_PWROK 0x0031 +#define regSOC_GAP_PWROK_BASE_IDX 2 + + +// addressBlock: aid_smuio_smuio_swtimer_SmuSmuioDec +// base address: 0x5ac70 +#define regPWR_VIRT_RESET_REQ 0x011c +#define regPWR_VIRT_RESET_REQ_BASE_IDX 2 +#define regPWR_DISP_TIMER_CONTROL 0x011d +#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 2 +#define regPWR_DISP_TIMER_DEBUG 0x011e +#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 2 +#define regPWR_DISP_TIMER2_CONTROL 0x011f +#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 2 +#define regPWR_DISP_TIMER2_DEBUG 0x0120 +#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 2 +#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x0121 +#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 2 +#define regPWR_IH_CONTROL 0x0122 +#define regPWR_IH_CONTROL_BASE_IDX 2 + + +// addressBlock: aid_smuio_smuio_misc_SmuSmuioDec +// base address: 0x5a000 +#define regSMUIO_MCM_CONFIG 0x0023 +#define regSMUIO_MCM_CONFIG_BASE_IDX 1 +#define regIP_DISCOVERY_VERSION 0x0000 +#define regIP_DISCOVERY_VERSION_BASE_IDX 2 +#define regSCRATCH_REGISTER0 0x01bd +#define regSCRATCH_REGISTER0_BASE_IDX 2 +#define regSCRATCH_REGISTER1 0x01be +#define regSCRATCH_REGISTER1_BASE_IDX 2 +#define regSCRATCH_REGISTER2 0x01bf +#define regSCRATCH_REGISTER2_BASE_IDX 2 +#define regSCRATCH_REGISTER3 0x01c0 +#define regSCRATCH_REGISTER3_BASE_IDX 2 +#define regSCRATCH_REGISTER4 0x01c1 +#define regSCRATCH_REGISTER4_BASE_IDX 2 +#define regSCRATCH_REGISTER5 0x01c2 +#define regSCRATCH_REGISTER5_BASE_IDX 2 +#define regSCRATCH_REGISTER6 0x01c3 +#define regSCRATCH_REGISTER6_BASE_IDX 2 +#define regSCRATCH_REGISTER7 0x01c4 +#define regSCRATCH_REGISTER7_BASE_IDX 2 + + +// addressBlock: aid_smuio_smuio_gpio_SmuSmuioDec +// base address: 0x5a500 +#define regSMU_GPIOPAD_SW_INT_STAT 0x0140 +#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 1 +#define regSMU_GPIOPAD_MASK 0x0141 +#define regSMU_GPIOPAD_MASK_BASE_IDX 1 +#define regSMU_GPIOPAD_A 0x0142 +#define regSMU_GPIOPAD_A_BASE_IDX 1 +#define regSMU_GPIOPAD_TXIMPSEL 0x0143 +#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 1 +#define regSMU_GPIOPAD_EN 0x0144 +#define regSMU_GPIOPAD_EN_BASE_IDX 1 +#define regSMU_GPIOPAD_Y 0x0145 +#define regSMU_GPIOPAD_Y_BASE_IDX 1 +#define regSMU_GPIOPAD_RXEN 0x0146 +#define regSMU_GPIOPAD_RXEN_BASE_IDX 1 +#define regSMU_GPIOPAD_RCVR_SEL0 0x0147 +#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 1 +#define regSMU_GPIOPAD_RCVR_SEL1 0x0148 +#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 1 +#define regSMU_GPIOPAD_PU_EN 0x0149 +#define regSMU_GPIOPAD_PU_EN_BASE_IDX 1 +#define regSMU_GPIOPAD_PD_EN 0x014a +#define regSMU_GPIOPAD_PD_EN_BASE_IDX 1 +#define regSMU_GPIOPAD_PINSTRAPS 0x014b +#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 1 +#define regDFT_PINSTRAPS 0x014c +#define regDFT_PINSTRAPS_BASE_IDX 1 +#define regSMU_GPIOPAD_INT_STAT_EN 0x014d +#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 1 +#define regSMU_GPIOPAD_INT_STAT 0x014e +#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 1 +#define regSMU_GPIOPAD_INT_STAT_AK 0x014f +#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 1 +#define regSMU_GPIOPAD_INT_EN 0x0150 +#define regSMU_GPIOPAD_INT_EN_BASE_IDX 1 +#define regSMU_GPIOPAD_INT_TYPE 0x0151 +#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 1 +#define regSMU_GPIOPAD_INT_POLARITY 0x0152 +#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 1 +#define regSMUIO_PCC_GPIO_SELECT 0x0155 +#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 1 +#define regSMU_GPIOPAD_S0 0x0156 +#define regSMU_GPIOPAD_S0_BASE_IDX 1 +#define regSMU_GPIOPAD_S1 0x0157 +#define regSMU_GPIOPAD_S1_BASE_IDX 1 +#define regSMU_GPIOPAD_SCHMEN 0x0158 +#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 1 +#define regSMU_GPIOPAD_SCL_EN 0x0159 +#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 1 +#define regSMU_GPIOPAD_SDA_EN 0x015a +#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 1 +#define regSMUIO_GPIO_INT0_SELECT 0x015b +#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 1 +#define regSMUIO_GPIO_INT1_SELECT 0x015c +#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 1 +#define regSMUIO_GPIO_INT2_SELECT 0x015d +#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 1 +#define regSMUIO_GPIO_INT3_SELECT 0x015e +#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 1 +#define regSMU_GPIOPAD_MP_INT0_STAT 0x015f +#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 1 +#define regSMU_GPIOPAD_MP_INT1_STAT 0x0160 +#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 1 +#define regSMU_GPIOPAD_MP_INT2_STAT 0x0161 +#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 1 +#define regSMU_GPIOPAD_MP_INT3_STAT 0x0162 +#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 1 +#define regSMIO_INDEX 0x0163 +#define regSMIO_INDEX_BASE_IDX 1 +#define regS0_VID_SMIO_CNTL 0x0164 +#define regS0_VID_SMIO_CNTL_BASE_IDX 1 +#define regS1_VID_SMIO_CNTL 0x0165 +#define regS1_VID_SMIO_CNTL_BASE_IDX 1 +#define regOPEN_DRAIN_SELECT 0x0166 +#define regOPEN_DRAIN_SELECT_BASE_IDX 1 +#define regSMIO_ENABLE 0x0167 +#define regSMIO_ENABLE_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h new file mode 100644 index 000000000000..be896f3089fe --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h @@ -0,0 +1,428 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _smuio_13_0_3_SH_MASK_HEADER +#define _smuio_13_0_3_SH_MASK_HEADER + + +// addressBlock: aid_smuio_smuio_reset_SmuSmuioDec +//SMUIO_MP_RESET_INTR +#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 +#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L +//SMUIO_SOC_HALT +#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2 +#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3 +#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L +#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L + + +// addressBlock: aid_smuio_smuio_tsc_SmuSmuioDec +//PWROK_REFCLK_GAP_CYCLES +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L +//GOLDEN_TSC_INCREMENT_UPPER +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_INCREMENT_LOWER +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL +//GOLDEN_TSC_COUNT_UPPER +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_COUNT_LOWER +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL +//SOC_GOLDEN_TSC_SHADOW_UPPER +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL +//SOC_GOLDEN_TSC_SHADOW_LOWER +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GAP_PWROK +#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 +#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L + + +// addressBlock: aid_smuio_smuio_swtimer_SmuSmuioDec +//PWR_VIRT_RESET_REQ +#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 +#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f +#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL +#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L +//PWR_DISP_TIMER_CONTROL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER_DEBUG +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L +//PWR_DISP_TIMER2_CONTROL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER2_DEBUG +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L +//PWR_DISP_TIMER_GLOBAL_CONTROL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L +//PWR_IH_CONTROL +#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f +#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L + + +// addressBlock: aid_smuio_smuio_misc_SmuSmuioDec +//SMUIO_MCM_CONFIG +#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 +#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 +#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8 +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc +#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 +#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 +#define SMUIO_MCM_CONFIG__TOPOLOGY_ID__SHIFT 0x12 +#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L +#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000003CL +#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000F00L +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L +#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L +#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L +#define SMUIO_MCM_CONFIG__TOPOLOGY_ID_MASK 0x007C0000L +//IP_DISCOVERY_VERSION +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER0 +#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 +#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER1 +#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 +#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER2 +#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 +#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER3 +#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 +#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER4 +#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 +#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER5 +#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 +#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER6 +#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 +#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER7 +#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 +#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL + + +// addressBlock: aid_smuio_smuio_gpio_SmuSmuioDec +//SMU_GPIOPAD_SW_INT_STAT +#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L +//SMU_GPIOPAD_MASK +#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0 +#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_A +#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0 +#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_TXIMPSEL +#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0 +#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_EN +#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0 +#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_Y +#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0 +#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RXEN +#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0 +#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RCVR_SEL0 +#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0 +#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RCVR_SEL1 +#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0 +#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PU_EN +#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0 +#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PD_EN +#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0 +#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PINSTRAPS +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L +//DFT_PINSTRAPS +#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0 +#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000003FFL +//SMU_GPIOPAD_INT_STAT_EN +#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L +//SMU_GPIOPAD_INT_STAT +#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L +//SMU_GPIOPAD_INT_STAT_AK +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c +#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L +#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L +//SMU_GPIOPAD_INT_EN +#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0 +#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f +#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L +//SMU_GPIOPAD_INT_TYPE +#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0 +#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f +#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L +//SMU_GPIOPAD_INT_POLARITY +#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0 +#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f +#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L +//SMUIO_PCC_GPIO_SELECT +#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0 +#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL +//SMU_GPIOPAD_S0 +#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0 +#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_S1 +#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0 +#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SCHMEN +#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0 +#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SCL_EN +#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0 +#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SDA_EN +#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0 +#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL +//SMUIO_GPIO_INT0_SELECT +#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT1_SELECT +#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT2_SELECT +#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT3_SELECT +#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL +//SMU_GPIOPAD_MP_INT0_STAT +#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT1_STAT +#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT2_STAT +#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT3_STAT +#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL +//SMIO_INDEX +#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0 +#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L +//S0_VID_SMIO_CNTL +#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0 +#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL +//S1_VID_SMIO_CNTL +#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0 +#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL +//OPEN_DRAIN_SELECT +#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0 +#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f +#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL +#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L +//SMIO_ENABLE +#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0 +#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL + +#endif From 3d2ea552b229495050316e84d7cb0257cb3cd13b Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Tue, 12 Apr 2022 23:33:20 -0400 Subject: [PATCH 357/861] drm/amdgpu: implement smuio v13_0_3 callbacks Add smuio v13_0_3 callbacks for SMUIO. Tested-by: Ori Messinger Acked-by: Alex Deucher Reviewed-by: Lijo Lazar Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h | 1 + drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c | 103 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h | 28 ++++++ 5 files changed, 138 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 90f771423c94..8418a90df493 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -205,6 +205,7 @@ amdgpu-y += \ smuio_v11_0.o \ smuio_v11_0_6.o \ smuio_v13_0.o \ + smuio_v13_0_3.o \ smuio_v13_0_6.o # add reset block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2b2d75763875..789115f5505f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -66,6 +66,11 @@ enum amdgpu_gfx_partition { #define NUM_XCC(x) hweight16(x) +enum amdgpu_pkg_type { + AMDGPU_PKG_TYPE_APU = 2, + AMDGPU_PKG_TYPE_UNKNOWN, +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h index c7a823f3f2c5..89c38d864471 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -30,6 +30,7 @@ struct amdgpu_smuio_funcs { void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags); u32 (*get_die_id)(struct amdgpu_device *adev); u32 (*get_socket_id)(struct amdgpu_device *adev); + enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev); bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c new file mode 100644 index 000000000000..4368a5891eeb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c @@ -0,0 +1,103 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v13_0_3.h" +#include "soc15_common.h" +#include "smuio/smuio_13_0_3_offset.h" +#include "smuio/smuio_13_0_3_sh_mask.h" + +#define PKG_TYPE_MASK 0x00000003L + +/** + * smuio_v13_0_3_get_die_id - query die id from FCH. + * + * @adev: amdgpu device pointer + * + * Returns die id + */ +static u32 smuio_v13_0_3_get_die_id(struct amdgpu_device *adev) +{ + u32 data, die_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID); + + return die_id; +} + +/** + * smuio_v13_0_3_get_socket_id - query socket id from FCH + * + * @adev: amdgpu device pointer + * + * Returns socket id + */ +static u32 smuio_v13_0_3_get_socket_id(struct amdgpu_device *adev) +{ + u32 data, socket_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID); + + return socket_id; +} + +/** + * smuio_v13_0_3_get_pkg_type - query package type set by MP1/bootcode + * + * @adev: amdgpu device pointer + * + * Returns package type + */ + +static enum amdgpu_pkg_type smuio_v13_0_3_get_pkg_type(struct amdgpu_device *adev) +{ + enum amdgpu_pkg_type pkg_type; + u32 data; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, PKG_TYPE); + /* pkg_type[4:0] + * + * bit 1 == 1 APU form factor + * + * b0100 - b1111 - Reserved + */ + switch (data & PKG_TYPE_MASK) { + case 0x2: + pkg_type = AMDGPU_PKG_TYPE_APU; + break; + default: + pkg_type = AMDGPU_PKG_TYPE_UNKNOWN; + break; + } + + return pkg_type; +} + + +const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs = { + .get_die_id = smuio_v13_0_3_get_die_id, + .get_socket_id = smuio_v13_0_3_get_socket_id, + .get_pkg_type = smuio_v13_0_3_get_pkg_type, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h new file mode 100644 index 000000000000..aec35f7efb58 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h @@ -0,0 +1,28 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V13_0_3_H__ +#define __SMUIO_V13_0_3_H__ + +extern const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs; + +#endif /* __SMUIO_V13_0_3_H__ */ From 6b7ec18b045ff524eab94340e18feefe9a783d2e Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 23 Nov 2021 22:27:17 +0800 Subject: [PATCH 358/861] drm/amdgpu: init smuio funcs for smuio v13_0_3 Add callbacks for SMUIO 13.0.3 Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 71293db09009..b602df4b445a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -82,6 +82,7 @@ #include "smuio_v11_0.h" #include "smuio_v11_0_6.h" #include "smuio_v13_0.h" +#include "smuio_v13_0_3.h" #include "smuio_v13_0_6.h" #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" @@ -2382,6 +2383,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 2): adev->smuio.funcs = &smuio_v13_0_funcs; break; + case IP_VERSION(13, 0, 3): + adev->smuio.funcs = &smuio_v13_0_3_funcs; + break; case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 8): adev->smuio.funcs = &smuio_v13_0_6_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h index aec35f7efb58..795f66c5a58b 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h @@ -23,6 +23,8 @@ #ifndef __SMUIO_V13_0_3_H__ #define __SMUIO_V13_0_3_H__ +#include "soc15_common.h" + extern const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs; #endif /* __SMUIO_V13_0_3_H__ */ From ea2d2f8ececdd4c778b66e19b82ce278dfc5e1c4 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Tue, 5 Apr 2022 13:00:13 -0400 Subject: [PATCH 359/861] drm/amdgpu: detect current GPU memory partition mode - Add helpers to detect the current GPU memory partition. - Add current memory partition mode sysfs node. Tested-by: Ori Messinger Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 25 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 12 ++++++++++++ 5 files changed, 60 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2f6d85090b55..c2e92664031b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1200,6 +1200,24 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", partition_mode); } +static ssize_t amdgpu_gfx_get_current_memory_partition(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + static const char *partition_modes[] = { + "UNKNOWN", "NPS1", "NPS2", "NPS4", "NPS8" + }; + BUILD_BUG_ON(ARRAY_SIZE(partition_modes) <= AMDGPU_NPS8_PARTITION_MODE); + + mode = min((int)adev->gfx.funcs->query_mem_partition_mode(adev), + AMDGPU_NPS8_PARTITION_MODE); + + return sysfs_emit(buf, "%s\n", partition_modes[mode]); +} + static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct device_attribute *addr, const char *buf, size_t count) @@ -1307,6 +1325,9 @@ static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, static DEVICE_ATTR(available_compute_partition, S_IRUGO, amdgpu_gfx_get_available_compute_partition, NULL); +static DEVICE_ATTR(current_memory_partition, S_IRUGO, + amdgpu_gfx_get_current_memory_partition, NULL); + int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) { int r; @@ -1319,5 +1340,9 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) if (r) return r; + r = device_create_file(adev->dev, &dev_attr_current_memory_partition); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 789115f5505f..e37501685fa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -71,6 +71,14 @@ enum amdgpu_pkg_type { AMDGPU_PKG_TYPE_UNKNOWN, }; +enum amdgpu_memory_partition { + UNKNOWN_MEMORY_PARTITION_MODE = 0, + AMDGPU_NPS1_PARTITION_MODE = 1, + AMDGPU_NPS2_PARTITION_MODE = 2, + AMDGPU_NPS4_PARTITION_MODE = 3, + AMDGPU_NPS8_PARTITION_MODE = 4, +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -268,6 +276,8 @@ struct amdgpu_gfx_funcs { struct amdgpu_gfx_shadow_info *shadow_info); enum amdgpu_gfx_partition (*query_partition_mode)(struct amdgpu_device *adev); + enum amdgpu_memory_partition + (*query_mem_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); }; @@ -404,6 +414,7 @@ struct amdgpu_gfx { enum amdgpu_gfx_partition partition_mode; uint16_t xcc_mask; + enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 8fa3a1f3b181..639b86c4d664 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -97,6 +97,7 @@ struct amdgpu_nbio_funcs { void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); u32 (*get_compute_partition_mode)(struct amdgpu_device *adev); + u32 (*get_memory_partition_mode)(struct amdgpu_device *adev); void (*set_compute_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 6ed97371ff1e..3c19c5a0069d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -606,6 +606,16 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, { soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } +static enum amdgpu_memory_partition +gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev); + + return mode; +} static enum amdgpu_gfx_partition gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) @@ -675,6 +685,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .query_partition_mode = &gfx_v9_4_3_query_compute_partition, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, + .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, }; static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 78eab4d48e38..fa4b423c399b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -30,6 +30,8 @@ #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include +#define NPS_MODE_MASK 0x000000FFL + static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -406,6 +408,15 @@ static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS, tmp); } +static enum amdgpu_memory_partition nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev) +{ + u32 tmp; + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE); + + return ffs(tmp); +} + const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -428,4 +439,5 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .set_compute_partition_mode = nbio_v7_9_set_compute_partition_mode, + .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, }; From d55391c2138c1a5bcd1316ccedf1835dd067d568 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 9 Nov 2022 19:47:38 +0530 Subject: [PATCH 360/861] drm/amdgpu: Revert programming of CP_PSP_XCP_CTL Programming of this register is taken care by PSP. Incorrect programming causes CP not to detect its XCC. Signed-off-by: Lijo Lazar Reported-by: Alexander Turek Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 3c19c5a0069d..9d7852ffd708 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1055,9 +1055,6 @@ static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp); - tmp = xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, PHYSICAL_XCC_ID); - tmp = tmp | (xcc_id << REG_FIELD_SHIFT(CP_PSP_XCP_CTL, XCC_DIE_ID)); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_PSP_XCP_CTL, tmp); break; default: break; From 4bc615a5237a641ddb2235236c21b1eaadb76963 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Sat, 12 Nov 2022 17:01:05 +0800 Subject: [PATCH 361/861] drm/amdgpu: fix vm context register assignment in mmhub v1.8 Assign the vm context register addr per aid instance. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 67338cb3d7bc..6f469b9aa9a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -515,19 +515,19 @@ static void mmhub_v1_8_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_aid; i++) { hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; - hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, + hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); - hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, + hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); hub->vm_inv_eng0_req = - SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ); + SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = - SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK); + SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK); hub->vm_context0_cntl = - SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL); - hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, 0, + SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i, regVM_L2_PROTECTION_FAULT_STATUS); - hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, 0, + hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; From cbd442ce91bdeb8f618511d65e674894a80e4c31 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Sat, 12 Nov 2022 16:30:40 +0530 Subject: [PATCH 362/861] drm/amdgpu: Skip runtime db read for PSP 13.0.6 Skip reading runtime db information for PSP 13.0.6. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index dd7f6d688449..1319df5796cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -329,6 +329,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, bool ret = false; int i; + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) + return false; + db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); From 37dd9d58a595f4cda5a7f01703592cc4e00f69b4 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Fri, 11 Nov 2022 15:54:52 +0800 Subject: [PATCH 363/861] drm/amdgpu: fix kcq mqd_backup buffer double free for multi-XCD For gfx_v9_4_3 and beyond, struct kiq has its own mqd_backup pointer rather than using the last pointer from mec struct. Then the kfree operation on the pointer from the mec struct should be removed otherwise it will cause double free on the first kcq's mqd_backup buffer on XCD1. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c2e92664031b..8aea9e023275 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -480,7 +480,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) ring = &kiq->ring; kfree(kiq->mqd_backup); - kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); From 993d218f82211b1e17fcea7a5f727fa16efba353 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 14 Nov 2022 15:52:19 +0800 Subject: [PATCH 364/861] drm/amdgpu: remove partition attributes sys file for gfx_v9_4_3 For driver de-init like rmmod operations those partition specific attributes need to be removed accordingly. Signed-off-by: Shiwu Zhang Reviewed-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 1 + 3 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 8aea9e023275..5ff49737d7c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1345,3 +1345,10 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) return 0; } + +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_current_compute_partition); + device_remove_file(adev->dev, &dev_attr_available_compute_partition); + device_remove_file(adev->dev, &dev_attr_current_memory_partition); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index e37501685fa9..023c5b08929f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -502,4 +502,5 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9d7852ffd708..75ad5176e99e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -895,6 +895,7 @@ static int gfx_v9_4_3_sw_fini(void *handle) gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); gfx_v9_4_3_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); return 0; } From eaae4beee8a94b30f37341c9d14837c82e7e2647 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 14 Nov 2022 17:35:43 -0500 Subject: [PATCH 365/861] drm/amdgpu: more GPU page fault info for GC v9.4.3 Output IH cookie node_id and translate it to the corresponding AID id and XCC id, to help debug the GPU page fault. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f2814270da40..2966aca9545d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,7 +557,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; - uint32_t node_id = 0; + uint32_t node_id; + + node_id = (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? entry->node_id : 0; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; @@ -570,8 +572,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - node_id = (adev->ip_versions[GC_HWIP][0] == - IP_VERSION(9, 4, 3)) ? entry->node_id : 0; hub = &adev->vmhub[node_id/2]; } @@ -634,6 +634,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", + node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, + node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); + if (amdgpu_sriov_vf(adev)) return 0; From 497db7ea33f7cec2a0019894e844789f003dbd22 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Tue, 8 Nov 2022 23:04:30 -0500 Subject: [PATCH 366/861] drm/amdgpu: Check APU supports true APP mode On GPXIP 9.4.3 APU, in no carveout mode there is no real vram heap and could be emulated by the driver over the interleaved NUMA system memory and the APU could also be in the carveout mode during early development stage or otherwise for debugging purpose so introduce a new member in amdgpu_gmc to figure out whether the APU is in the native mode as per the production configuration. AMD_IS_APU cannot be used for Accelerated Processing Platform APUs as it might be used in a different context on previous generations or on small APUs. Reviewed-by: Hawking Zhang Reviewed-by: Felix Kuehling Tested-by: Graham Sider Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 572ef5be539f..e408abfc2daf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -251,6 +251,7 @@ struct amdgpu_gmc { uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER; bool tmz_enabled; + bool is_app_apu; const struct amdgpu_gmc_funcs *gmc_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2966aca9545d..0792c48fe347 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1442,6 +1442,20 @@ static int gmc_v9_0_early_init(void *handle) adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); } + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + enum amdgpu_pkg_type pkg_type = + adev->smuio.funcs->get_pkg_type(adev); + /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present + * and the APU, can be in used two possible modes: + * - carveout mode + * - native APU mode + * "is_app_apu" can be used to identify the APU in the native + * mode. + */ + adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU && + !pci_resource_len(adev->pdev, 0)); + } + gmc_v9_0_set_gmc_funcs(adev); gmc_v9_0_set_irq_funcs(adev); gmc_v9_0_set_umc_funcs(adev); From 880f8b3f8032a022c85351857ba7020fd3345592 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 20 Jul 2022 13:45:30 +0530 Subject: [PATCH 367/861] drm/amdgpu: Rename xcc specific functions Add 'xcc' prefix to xcc specific functions to distinguish from IP block functions. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 205 +++++++++++++----------- 1 file changed, 113 insertions(+), 92 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 75ad5176e99e..93420c7db93c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -506,11 +506,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, - u32 se_num, - u32 sh_num, - u32 instance, - int xcc_id) +static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -678,7 +675,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, - .select_se_sh = &gfx_v9_4_3_select_se_sh, + .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, .read_wave_data = &gfx_v9_4_3_read_wave_data, .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, @@ -901,7 +898,8 @@ static int gfx_v9_4_3_sw_fini(void *handle) } #define DEFAULT_SH_MEM_BASES (0x6000) -static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, + int xcc_id) { int i; uint32_t sh_mem_config; @@ -939,7 +937,7 @@ static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev, int xcc_id) } } -static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) { int vmid; @@ -1000,25 +998,26 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); for (i = 0; i < num_xcc; i++) { - gfx_v9_4_3_init_compute_vmid(adev, i); - gfx_v9_4_3_init_gds_vmid(adev, i); + gfx_v9_4_3_xcc_init_compute_vmid(adev, i); + gfx_v9_4_3_xcc_init_gds_vmid(adev, i); } } -static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev, - int xcc_id) +static void +gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev, + int xcc_id) { WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); } -static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) { /* * Rlc save restore list is workable since v2_1. * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) - gfx_v9_4_3_enable_save_restore_machine(adev, xcc_id); + gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | @@ -1031,7 +1030,7 @@ static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id) } } -static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; @@ -1040,7 +1039,8 @@ static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); } -static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev, + int xcc_id) { uint32_t tmp = 0; int num_xcc; @@ -1074,7 +1074,7 @@ static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) return true; } -static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; unsigned i; @@ -1091,7 +1091,8 @@ static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev, int xcc_id) } } -static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) +static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, + int xcc_id) { uint32_t data; @@ -1108,8 +1109,8 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, - int xcc_id) +static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, + int xcc_id) { u32 i, j, k; u32 mask; @@ -1117,16 +1118,17 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); + gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, + xcc_id); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SERDES_CU_MASTER_BUSY) == 0) break; udelay(1); } if (k == adev->usec_timeout) { - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff, - xcc_id); + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, + 0xffffffff, + 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -1134,7 +1136,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -1148,8 +1151,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } } -static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, - bool enable, int xcc_id) +static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, + bool enable, int xcc_id) { u32 tmp; @@ -1171,8 +1174,8 @@ static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), RLC_CNTL, RLC_ENABLE_F32, 0); - gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); - gfx_v9_4_3_wait_for_rlc_serdes(adev, i); + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, i); + gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, i); } } @@ -1203,7 +1206,7 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) { - gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, i); udelay(50); } @@ -1226,7 +1229,8 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) } } -static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id) +static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, + int xcc_id) { const struct rlc_firmware_header_v2_0 *hdr; const __le32 *fw_data; @@ -1267,11 +1271,11 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CGCG_CGLS_CTRL, 0); - gfx_v9_4_3_init_pg(adev, i); + gfx_v9_4_3_xcc_init_pg(adev, i); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { /* legacy rlc firmware loading */ - r = gfx_v9_4_3_rlc_load_microcode(adev, i); + r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, i); if (r) return r; } @@ -1341,8 +1345,8 @@ static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offs ARRAY_SIZE(rlcg_access_gc_9_4_3)); } -static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev, - bool enable, int xcc_id) +static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, + bool enable, int xcc_id) { if (enable) { WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); @@ -1354,8 +1358,8 @@ static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev, udelay(50); } -static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev, - int xcc_id) +static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev, + int xcc_id) { const struct gfx_firmware_header_v1_0 *mec_hdr; const __le32 *fw_data; @@ -1367,7 +1371,7 @@ static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev, if (!adev->gfx.mec_fw) return -EINVAL; - gfx_v9_4_3_cp_compute_enable(adev, false, xcc_id); + gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); @@ -1403,7 +1407,7 @@ static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev, } /* KIQ functions */ -static void gfx_v9_4_3_kiq_setting(struct amdgpu_ring *ring, int xcc_id) +static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id) { uint32_t tmp; struct amdgpu_device *adev = ring->adev; @@ -1558,7 +1562,8 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) return 0; } -static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id) +static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, + int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -1663,7 +1668,8 @@ static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring, int xcc_id) +static int gfx_v9_4_3_xcc_kiq_fini_register(struct amdgpu_ring *ring, + int xcc_id) { struct amdgpu_device *adev = ring->adev; int j; @@ -1702,13 +1708,13 @@ static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) +static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; struct v9_mqd *tmp_mqd; - gfx_v9_4_3_kiq_setting(ring, xcc_id); + gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id); /* GPU could be in bad state during probe, driver trigger the reset * after load the SMU, in this case , the mqd is not be initialized. @@ -1726,7 +1732,7 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); - gfx_v9_4_3_kiq_init_register(ring, xcc_id); + gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); } else { @@ -1736,7 +1742,7 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); gfx_v9_4_3_mqd_init(ring); - gfx_v9_4_3_kiq_init_register(ring, xcc_id); + gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); @@ -1747,7 +1753,7 @@ static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) +static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -1785,7 +1791,7 @@ static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev, int xcc_id) +static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; int r; @@ -1802,7 +1808,7 @@ static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev, int xcc_id) return r; } - gfx_v9_4_3_kiq_init_queue(ring, xcc_id); + gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); @@ -1810,12 +1816,12 @@ static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev, int xcc_id) return 0; } -static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev, int xcc_id) +static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring = NULL; int r = 0, i; - gfx_v9_4_3_cp_compute_enable(adev, true, xcc_id); + gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; @@ -1825,7 +1831,7 @@ static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev, int xcc_id) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v9_4_3_kcq_init_queue(ring, xcc_id); + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -1846,12 +1852,12 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, i); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - gfx_v9_4_3_disable_gpa_mode(adev, i); + gfx_v9_4_3_xcc_disable_gpa_mode(adev, i); - r = gfx_v9_4_3_cp_compute_load_microcode(adev, i); + r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, i); if (r) return r; } @@ -1862,13 +1868,13 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) adev, amdgpu_user_partt_mode); /* set the virtual and physical id based on partition_mode */ - gfx_v9_4_3_program_xcc_id(adev, i); + gfx_v9_4_3_xcc_program_xcc_id(adev, i); - r = gfx_v9_4_3_kiq_resume(adev, i); + r = gfx_v9_4_3_xcc_kiq_resume(adev, i); if (r) return r; - r = gfx_v9_4_3_kcq_resume(adev, i); + r = gfx_v9_4_3_xcc_kcq_resume(adev, i); if (r) return r; @@ -1879,16 +1885,16 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) return r; } - gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, i); } return 0; } -static void gfx_v9_4_3_cp_enable(struct amdgpu_device *adev, bool enable, - int xcc_id) +static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, + int xcc_id) { - gfx_v9_4_3_cp_compute_enable(adev, enable, xcc_id); + gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); } static int gfx_v9_4_3_hw_init(void *handle) @@ -1932,12 +1938,13 @@ static int gfx_v9_4_3_hw_fini(void *handle) soc15_grbm_select(adev, adev->gfx.kiq[i].ring.me, adev->gfx.kiq[i].ring.pipe, adev->gfx.kiq[i].ring.queue, 0, GET_INST(GC, i)); - gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[i].ring, i); + gfx_v9_4_3_xcc_kiq_fini_register(&adev->gfx.kiq[i].ring, + i); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, i)); mutex_unlock(&adev->srbm_mutex); } - gfx_v9_4_3_cp_enable(adev, false, i); + gfx_v9_4_3_xcc_cp_enable(adev, false, i); } /* Skip suspend with A+A reset */ @@ -2024,7 +2031,7 @@ static int gfx_v9_4_3_soft_reset(void *handle) adev->gfx.rlc.funcs->stop(adev); /* Disable MEC parsing/prefetching */ - gfx_v9_4_3_cp_compute_enable(adev, false, 0); + gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0); if (grbm_soft_reset) { tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); @@ -2111,8 +2118,9 @@ static int gfx_v9_4_3_late_init(void *handle) return 0; } -static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable, int xcc_id) +static void +gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable, int xcc_id) { uint32_t data, def; @@ -2180,8 +2188,9 @@ static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *ad amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); } -static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *adev, - bool enable, int xcc_id) +static void +gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, + bool enable, int xcc_id) { uint32_t def, data; @@ -2232,31 +2241,35 @@ static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *ad amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); } -static int gfx_v9_4_3_update_gfx_clock_gating(struct amdgpu_device *adev, - bool enable, int xcc_id) +static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, + bool enable, int xcc_id) { if (enable) { /* CGCG/CGLS should be enabled after MGCG/MGLS * === MGCG + MGLS === */ - gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable, xcc_id); + gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, + xcc_id); /* === CGCG + CGLS === */ - gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable, xcc_id); + gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, + xcc_id); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS * === CGCG + CGLS === */ - gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable, xcc_id); + gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, + xcc_id); /* === MGCG + MGLS === */ - gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable, xcc_id); + gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, + xcc_id); } return 0; } static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, - .set_safe_mode = gfx_v9_4_3_set_safe_mode, - .unset_safe_mode = gfx_v9_4_3_unset_safe_mode, + .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode, + .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode, .init = gfx_v9_4_3_rlc_init, .resume = gfx_v9_4_3_rlc_resume, .stop = gfx_v9_4_3_rlc_stop, @@ -2285,8 +2298,8 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle, switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): for (i = 0; i < num_xcc; i++) - gfx_v9_4_3_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE, i); + gfx_v9_4_3_xcc_update_gfx_clock_gating( + adev, state == AMD_CG_STATE_GATE, i); break; default: break; @@ -2553,10 +2566,9 @@ static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } -static void gfx_v9_4_3_set_compute_eop_interrupt_state(struct amdgpu_device *adev, - int me, int pipe, - enum amdgpu_interrupt_state state, - int xcc_id) +static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + struct amdgpu_device *adev, int me, int pipe, + enum amdgpu_interrupt_state state, int xcc_id) { u32 mec_int_cntl, mec_int_cntl_reg; @@ -2664,28 +2676,36 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, for (i = 0; i < num_xcc; i++) { switch (type) { case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 1, 0, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 1, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 1, 1, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 2, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 1, 2, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 3, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 1, 3, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 0, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 2, 0, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 1, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 2, 1, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 2, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 2, 2, state, i); break; case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: - gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 3, state, i); + gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( + adev, 2, 3, state, i); break; default: break; @@ -3090,7 +3110,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, 0); + gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_4_3_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); @@ -3123,7 +3143,8 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; From 44b5cf2e0f7952856f48b9be56b9eb2f688d70f0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 16 Nov 2022 10:47:18 +0530 Subject: [PATCH 368/861] drm/amdgpu: Add xcc specific functions Add more XCC specific functions and use them from IP block functions. RLC, CP functions are further split to have xcc specific versions. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 307 ++++++++++++++---------- 1 file changed, 182 insertions(+), 125 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 93420c7db93c..93a0baa4515c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -955,52 +955,64 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) } } -static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) +static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, + int xcc_id) { u32 tmp; - int i, j, num_xcc; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - - gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); - adev->gfx.config.db_debug2 = RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); + int i; /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { - for (j = 0; j < num_xcc; j++) { - soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, j)); - /* CP and shaders */ - if (i == 0) { - tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, - SH_MEM_ALIGNMENT_MODE_UNALIGNED); - tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, - !!adev->gmc.noretry); - WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_CONFIG, tmp); - WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_BASES, 0); - } else { - tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, - SH_MEM_ALIGNMENT_MODE_UNALIGNED); - tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, - !!adev->gmc.noretry); - WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_CONFIG, tmp); - tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, - (adev->gmc.private_aperture_start >> 48)); - tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, - (adev->gmc.shared_aperture_start >> 48)); - WREG32_SOC15_RLC(GC, GET_INST(GC, j), regSH_MEM_BASES, tmp); - } + soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); + /* CP and shaders */ + if (i == 0) { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + SH_MEM_ALIGNMENT_MODE_UNALIGNED); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, + !!adev->gmc.noretry); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), + regSH_MEM_CONFIG, tmp); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), + regSH_MEM_BASES, 0); + } else { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, + SH_MEM_ALIGNMENT_MODE_UNALIGNED); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, + !!adev->gmc.noretry); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), + regSH_MEM_CONFIG, tmp); + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, + (adev->gmc.private_aperture_start >> + 48)); + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, + (adev->gmc.shared_aperture_start >> + 48)); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), + regSH_MEM_BASES, tmp); } } soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); mutex_unlock(&adev->srbm_mutex); - for (i = 0; i < num_xcc; i++) { - gfx_v9_4_3_xcc_init_compute_vmid(adev, i); - gfx_v9_4_3_xcc_init_gds_vmid(adev, i); - } + gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); + gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); +} + +static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) +{ + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); + adev->gfx.config.db_debug2 = + RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); + + for (i = 0; i < num_xcc; i++) + gfx_v9_4_3_xcc_constants_init(adev, i); } static void @@ -1167,16 +1179,31 @@ static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); } +static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id) +{ + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, + RLC_ENABLE_F32, 0); + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); + gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id); +} + static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) { int i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), RLC_CNTL, RLC_ENABLE_F32, 0); - gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, i); - gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, i); - } + for (i = 0; i < num_xcc; i++) + gfx_v9_4_3_xcc_rlc_stop(adev, i); +} + +static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id) +{ + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, + SOFT_RESET_RLC, 1); + udelay(50); + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, + SOFT_RESET_RLC, 0); + udelay(50); } static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) @@ -1184,10 +1211,19 @@ static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) int i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - udelay(50); - WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); + for (i = 0; i < num_xcc; i++) + gfx_v9_4_3_xcc_rlc_reset(adev, i); +} + +static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id) +{ + WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, + RLC_ENABLE_F32, 1); + udelay(50); + + /* carrizo do enable cp interrupt after cp inited */ + if (!(adev->flags & AMD_IS_APU)) { + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); udelay(50); } } @@ -1201,15 +1237,7 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), RLC_CNTL, RLC_ENABLE_F32, 1); - udelay(50); - - /* carrizo do enable cp interrupt after cp inited */ - if (!(adev->flags & AMD_IS_APU)) { - gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, i); - udelay(50); - } - + gfx_v9_4_3_xcc_rlc_start(adev, i); #ifdef AMDGPU_RLC_DEBUG_RETRY /* RLC_GPM_GENERAL_6 : RLC Ucode version */ rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); @@ -1260,29 +1288,40 @@ static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, return 0; } +static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) +{ + int r; + + gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); + + /* disable CG */ + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); + + gfx_v9_4_3_xcc_init_pg(adev, xcc_id); + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + /* legacy rlc firmware loading */ + r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); + if (r) + return r; + } + + gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); + + return 0; +} + static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) { int r, i, num_xcc; - adev->gfx.rlc.funcs->stop(adev); - num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - /* disable CG */ - WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CGCG_CGLS_CTRL, 0); - - gfx_v9_4_3_xcc_init_pg(adev, i); - - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - /* legacy rlc firmware loading */ - r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, i); - if (r) - return r; - } + r = gfx_v9_4_3_xcc_rlc_resume(adev, i); + if (r) + return r; } - adev->gfx.rlc.funcs->start(adev); - return 0; } @@ -1845,47 +1884,58 @@ done: return r; } +static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_ring *ring; + int r, j; + + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id); + + r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); + if (r) + return r; + } + + if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + gfx_v9_4_3_switch_compute_partition(adev, + amdgpu_user_partt_mode); + + /* set the virtual and physical id based on partition_mode */ + gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); + + r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); + if (r) + return r; + + r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id); + if (r) + return r; + + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring + [j + xcc_id * adev->gfx.num_compute_rings]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + + gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); + + return 0; +} + static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r, i, j, num_xcc; - struct amdgpu_ring *ring; + int r, i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, i); - - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - gfx_v9_4_3_xcc_disable_gpa_mode(adev, i); - - r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, i); - if (r) - return r; - } - - if (adev->gfx.partition_mode == - AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - gfx_v9_4_3_switch_compute_partition( - adev, amdgpu_user_partt_mode); - - /* set the virtual and physical id based on partition_mode */ - gfx_v9_4_3_xcc_program_xcc_id(adev, i); - - r = gfx_v9_4_3_xcc_kiq_resume(adev, i); + r = gfx_v9_4_3_xcc_cp_resume(adev, i); if (r) return r; - - r = gfx_v9_4_3_xcc_kcq_resume(adev, i); - if (r) - return r; - - for (j = 0; j < adev->gfx.num_compute_rings; j++) { - ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings]; - r = amdgpu_ring_test_helper(ring); - if (r) - return r; - } - - gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, i); } return 0; @@ -1897,6 +1947,37 @@ static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); } +static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) +{ + if (amdgpu_gfx_disable_kcq(adev, xcc_id)) + DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id); + + /* Use deinitialize sequence from CAIL when unbinding device + * from driver, otherwise KIQ is hanging when binding back + */ + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me, + adev->gfx.kiq[xcc_id].ring.pipe, + adev->gfx.kiq[xcc_id].ring.queue, 0, + GET_INST(GC, xcc_id)); + gfx_v9_4_3_xcc_kiq_fini_register(&adev->gfx.kiq[xcc_id].ring, + xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); + mutex_unlock(&adev->srbm_mutex); + } + + gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); + + /* Skip suspend with A+A reset */ + if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { + dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + return; + } + + gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); +} + static int gfx_v9_4_3_hw_init(void *handle) { int r; @@ -1927,33 +2008,9 @@ static int gfx_v9_4_3_hw_fini(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { - if (amdgpu_gfx_disable_kcq(adev, i)) - DRM_ERROR("XCD %d KCQ disable failed\n", i); - - /* Use deinitialize sequence from CAIL when unbinding device - * from driver, otherwise KIQ is hanging when binding back - */ - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, adev->gfx.kiq[i].ring.me, - adev->gfx.kiq[i].ring.pipe, - adev->gfx.kiq[i].ring.queue, 0, GET_INST(GC, i)); - gfx_v9_4_3_xcc_kiq_fini_register(&adev->gfx.kiq[i].ring, - i); - soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, i)); - mutex_unlock(&adev->srbm_mutex); - } - - gfx_v9_4_3_xcc_cp_enable(adev, false, i); + gfx_v9_4_3_xcc_fini(adev, i); } - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); - return 0; - } - - adev->gfx.rlc.funcs->stop(adev); return 0; } From dd1a02e280dae1904c8858c8cb572a61a84ba7c0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 14 Sep 2022 12:16:48 +0530 Subject: [PATCH 369/861] drm/amdgpu: Add xcc specific functions for gfxhub GFXHUB 1.2 supports multiple XCC instances. Add XCC specific functions to handle XCC instances separately. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 + drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 200 ++++++++++++++--------- 2 files changed, 127 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 4c4ce33c8359..c2dd100bbd60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1263,6 +1263,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); +#define for_each_inst(i, inst_mask) \ + for (i = ffs(inst_mask) - 1; inst_mask; \ + inst_mask &= ~(1U << i), i = ffs(inst_mask) - 1) + #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) /* Common functions */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index c26ac0662c7e..b9316bfb7263 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -38,15 +38,15 @@ static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev) return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24; } -static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, - uint32_t vmid, - uint64_t page_table_base) +static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev, + uint32_t vmid, + uint64_t page_table_base, + uint32_t xcc_mask) { struct amdgpu_vmhub *hub; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; WREG32_SOC15_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -57,27 +57,36 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); - } } -static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) +static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, + uint32_t vmid, + uint64_t page_table_base) +{ + uint32_t xcc_mask; + + xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0); + gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask); +} + +static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, + uint32_t xcc_mask) { uint64_t pt_base; - int i, num_xcc; + int i; if (adev->gmc.pdb0_bo) pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); else pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - gfxhub_v1_2_setup_vm_pt_regs(adev, 0, pt_base); + gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask); /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -110,14 +119,15 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) +static void +gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, + uint32_t xcc_mask) { uint64_t value; uint32_t tmp; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { /* Program the AGP BAR */ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); @@ -178,13 +188,13 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) +static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev, + uint32_t xcc_mask) { uint32_t tmp; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { /* Setup TLB control */ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL); @@ -204,13 +214,13 @@ static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) +static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev, + uint32_t xcc_mask) { uint32_t tmp; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { /* Setup L2 cache */ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -252,13 +262,13 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) +static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev, + uint32_t xcc_mask) { uint32_t tmp; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, @@ -271,12 +281,13 @@ static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) +static void +gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev, + uint32_t xcc_mask) { - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); @@ -298,12 +309,13 @@ static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) +static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev, + uint32_t xcc_mask) { struct amdgpu_vmhub *hub; unsigned num_level, block_size; uint32_t tmp; - int i, j, num_xcc; + int i, j; num_level = adev->vm_manager.num_level; block_size = adev->vm_manager.block_size; @@ -312,8 +324,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (j = 0; j < num_xcc; j++) { + for_each_inst(j, xcc_mask) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i); @@ -368,13 +379,13 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) } } -static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) +static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev, + uint32_t xcc_mask) { struct amdgpu_vmhub *hub; - unsigned i, j, num_xcc; + unsigned int i, j; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (j = 0; j < num_xcc; j++) { + for_each_inst(j, xcc_mask) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0 ; i < 18; ++i) { @@ -386,18 +397,21 @@ static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) } } -static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) +static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev, + uint32_t xcc_mask) { - int i, num_xcc; + uint32_t tmp_mask; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { - if (amdgpu_sriov_vf(adev)) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ + tmp_mask = xcc_mask; + /* + * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are + * VF copy registers so vbios post doesn't program them, for + * SRIOV driver need to program them + */ + if (amdgpu_sriov_vf(adev)) { + for_each_inst(i, tmp_mask) { + i = ffs(tmp_mask) - 1; WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, adev->gmc.vram_start >> 24); WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, @@ -406,29 +420,37 @@ static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) } /* GART Enable. */ - gfxhub_v1_2_init_gart_aperture_regs(adev); - gfxhub_v1_2_init_system_aperture_regs(adev); - gfxhub_v1_2_init_tlb_regs(adev); + gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask); + gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask); + gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask); if (!amdgpu_sriov_vf(adev)) - gfxhub_v1_2_init_cache_regs(adev); + gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask); - gfxhub_v1_2_enable_system_domain(adev); + gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask); if (!amdgpu_sriov_vf(adev)) - gfxhub_v1_2_disable_identity_aperture(adev); - gfxhub_v1_2_setup_vmid_config(adev); - gfxhub_v1_2_program_invalidation(adev); + gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask); + gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask); + gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask); return 0; } -static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) +static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) +{ + uint32_t xcc_mask; + + xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0); + return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask); +} + +static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev, + uint32_t xcc_mask) { struct amdgpu_vmhub *hub; u32 tmp; - u32 i, j, num_xcc; + u32 i, j; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (j = 0; j < num_xcc; j++) { + for_each_inst(j, xcc_mask) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; /* Disable all tables */ for (i = 0; i < 16; i++) @@ -452,20 +474,22 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) } } -/** - * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling - * - * @adev: amdgpu_device pointer - * @value: true redirects VM faults to the default page - */ -static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, - bool value) +static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) +{ + uint32_t xcc_mask; + + xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0); + gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask); +} + +static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev, + bool value, + uint32_t xcc_mask) { u32 tmp; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -501,13 +525,27 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, } } -static void gfxhub_v1_2_init(struct amdgpu_device *adev) +/** + * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + uint32_t xcc_mask; + + xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0); + gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask); +} + +static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask) { struct amdgpu_vmhub *hub; - int i, num_xcc; + int i; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - for (i = 0; i < num_xcc; i++) { + for_each_inst(i, xcc_mask) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; hub->ctx0_ptb_addr_lo32 = @@ -543,6 +581,14 @@ static void gfxhub_v1_2_init(struct amdgpu_device *adev) } } +static void gfxhub_v1_2_init(struct amdgpu_device *adev) +{ + uint32_t xcc_mask; + + xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0); + gfxhub_v1_2_xcc_init(adev, xcc_mask); +} + static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev) { u32 max_num_physical_nodes; From 527c670e5323414dbef8f4719dc9b348a50ac1c8 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 14 Sep 2022 12:48:08 +0530 Subject: [PATCH 370/861] drm/amdgpu: Add sdma instance specific functions SDMA 4.4.2 supports multiple instances. Add functions to support handling of each SDMA instance separately. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 162 +++++++++++++---------- 1 file changed, 91 insertions(+), 71 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 016813b295ac..6a971e15b4b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -94,7 +94,8 @@ static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id) } } -static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev) +static void sdma_v4_4_2_inst_init_golden_registers(struct amdgpu_device *adev, + uint32_t inst_mask) { u32 val; int i; @@ -418,13 +419,14 @@ static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 * * Stop the gfx async dma ring buffers. */ -static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev) +static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, + uint32_t inst_mask) { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; int i, unset = 0; - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { sdma[i] = &adev->sdma.instance[i].ring; if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { @@ -448,7 +450,8 @@ static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev) * * Stop the compute async dma queues. */ -static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev) +static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev, + uint32_t inst_mask) { /* XXX todo */ } @@ -460,14 +463,15 @@ static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev) * * Stop the page async dma ring buffers. */ -static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev) +static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev, + uint32_t inst_mask) { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; int i; bool unset = false; - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { sdma[i] = &adev->sdma.instance[i].page; if ((adev->mman.buffer_funcs_ring == sdma[i]) && @@ -495,7 +499,8 @@ static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev) * * Halt or unhalt the async dma engines context switch. */ -static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev, + bool enable, uint32_t inst_mask) { u32 f32_cntl, phase_quantum = 0; int i; @@ -524,7 +529,7 @@ static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enabl unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT; } - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { f32_cntl = RREG32_SDMA(i, regSDMA_CNTL); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL, AUTO_CTXSW_ENABLE, enable ? 1 : 0); @@ -538,7 +543,6 @@ static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enabl /* Extend page fault timeout to avoid interrupt storm */ WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080); } - } /** @@ -546,22 +550,24 @@ static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enabl * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. + * @inst_mask: mask of dma engine instances to be enabled * * Halt or unhalt the async dma engines. */ -static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable) +static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable, + uint32_t inst_mask) { u32 f32_cntl; int i; if (!enable) { - sdma_v4_4_2_gfx_stop(adev); - sdma_v4_4_2_rlc_stop(adev); + sdma_v4_4_2_inst_gfx_stop(adev, inst_mask); + sdma_v4_4_2_inst_rlc_stop(adev, inst_mask); if (adev->sdma.has_page_queue) - sdma_v4_4_2_page_stop(adev); + sdma_v4_4_2_inst_page_stop(adev, inst_mask); } - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1); WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl); @@ -780,7 +786,8 @@ static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev) * Set up the compute DMA queues and enable them. * Returns 0 for success, error for failure. */ -static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev) +static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev, + uint32_t inst_mask) { sdma_v4_4_2_init_pg(adev); @@ -795,7 +802,8 @@ static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev) * Loads the sDMA0/1 ucode. * Returns 0 for success, -EINVAL if the ucode is not available. */ -static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev) +static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev, + uint32_t inst_mask) { const struct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data; @@ -803,9 +811,9 @@ static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev) int i, j; /* halt the MEs */ - sdma_v4_4_2_enable(adev, false); + sdma_v4_4_2_inst_enable(adev, false, inst_mask); - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { if (!adev->sdma.instance[i].fw) return -EINVAL; @@ -831,38 +839,41 @@ static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev) } /** - * sdma_v4_4_2_start - setup and start the async dma engines + * sdma_v4_4_2_inst_start - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the DMA engines and enable them. * Returns 0 for success, error for failure. */ -static int sdma_v4_4_2_start(struct amdgpu_device *adev) +static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, + uint32_t inst_mask) { struct amdgpu_ring *ring; + uint32_t tmp_mask; int i, r = 0; if (amdgpu_sriov_vf(adev)) { - sdma_v4_4_2_ctx_switch_enable(adev, false); - sdma_v4_4_2_enable(adev, false); + sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask); + sdma_v4_4_2_inst_enable(adev, false, inst_mask); } else { /* bypass sdma microcode loading on Gopher */ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP && - !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) { - r = sdma_v4_4_2_load_microcode(adev); + adev->sdma.instance[0].fw) { + r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask); if (r) return r; } /* unhalt the MEs */ - sdma_v4_4_2_enable(adev, true); + sdma_v4_4_2_inst_enable(adev, true, inst_mask); /* enable sdma ring preemption */ - sdma_v4_4_2_ctx_switch_enable(adev, true); + sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask); } /* start the gfx rings and rlc compute queues */ - for (i = 0; i < adev->sdma.num_instances; i++) { + tmp_mask = inst_mask; + for_each_inst(i, tmp_mask) { uint32_t temp; WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); @@ -889,15 +900,16 @@ static int sdma_v4_4_2_start(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) { - sdma_v4_4_2_ctx_switch_enable(adev, true); - sdma_v4_4_2_enable(adev, true); + sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask); + sdma_v4_4_2_inst_enable(adev, true, inst_mask); } else { - r = sdma_v4_4_2_rlc_resume(adev); + r = sdma_v4_4_2_inst_rlc_resume(adev, inst_mask); if (r) return r; } - for (i = 0; i < adev->sdma.num_instances; i++) { + tmp_mask = inst_mask; + for_each_inst(i, tmp_mask) { ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_helper(ring); @@ -1383,14 +1395,17 @@ static int sdma_v4_4_2_hw_init(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t inst_mask; + /* TODO: Check if this is needed */ if (adev->flags & AMD_IS_APU) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); if (!amdgpu_sriov_vf(adev)) - sdma_v4_4_2_init_golden_registers(adev); + sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask); - r = sdma_v4_4_2_start(adev); + r = sdma_v4_4_2_inst_start(adev, inst_mask); return r; } @@ -1398,22 +1413,27 @@ static int sdma_v4_4_2_hw_init(void *handle) static int sdma_v4_4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t inst_mask; int i; if (amdgpu_sriov_vf(adev)) return 0; + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i); } - sdma_v4_4_2_ctx_switch_enable(adev, false); - sdma_v4_4_2_enable(adev, false); + sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask); + sdma_v4_4_2_inst_enable(adev, false, inst_mask); return 0; } +static int sdma_v4_4_2_set_clockgating_state(void *handle, + enum amd_clockgating_state state); + static int sdma_v4_4_2_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1650,15 +1670,39 @@ static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev, return 0; } -static void sdma_v4_4_2_update_medium_grain_clock_gating( - struct amdgpu_device *adev, - bool enable) +static void sdma_v4_4_2_inst_update_medium_grain_light_sleep( + struct amdgpu_device *adev, bool enable, uint32_t inst_mask) +{ + uint32_t data, def; + int i; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { + for_each_inst(i, inst_mask) { + /* 1-not override: enable sdma mem light sleep */ + def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL); + data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32_SDMA(i, regSDMA_POWER_CNTL, data); + } + } else { + for_each_inst(i, inst_mask) { + /* 0-override:disable sdma mem light sleep */ + def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL); + data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32_SDMA(i, regSDMA_POWER_CNTL, data); + } + } +} + +static void sdma_v4_4_2_inst_update_medium_grain_clock_gating( + struct amdgpu_device *adev, bool enable, uint32_t inst_mask) { uint32_t data, def; int i; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | @@ -1672,7 +1716,7 @@ static void sdma_v4_4_2_update_medium_grain_clock_gating( WREG32_SDMA(i, regSDMA_CLK_CTRL, data); } } else { - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | @@ -1688,45 +1732,21 @@ static void sdma_v4_4_2_update_medium_grain_clock_gating( } } - -static void sdma_v4_4_2_update_medium_grain_light_sleep( - struct amdgpu_device *adev, - bool enable) -{ - uint32_t data, def; - int i; - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { - for (i = 0; i < adev->sdma.num_instances; i++) { - /* 1-not override: enable sdma mem light sleep */ - def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL); - data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; - if (def != data) - WREG32_SDMA(0, regSDMA_POWER_CNTL, data); - } - } else { - for (i = 0; i < adev->sdma.num_instances; i++) { - /* 0-override:disable sdma mem light sleep */ - def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL); - data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; - if (def != data) - WREG32_SDMA(0, regSDMA_POWER_CNTL, data); - } - } -} - static int sdma_v4_4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t inst_mask; if (amdgpu_sriov_vf(adev)) return 0; - sdma_v4_4_2_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE); - sdma_v4_4_2_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE); + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); + + sdma_v4_4_2_inst_update_medium_grain_clock_gating( + adev, state == AMD_CG_STATE_GATE, inst_mask); + sdma_v4_4_2_inst_update_medium_grain_light_sleep( + adev, state == AMD_CG_STATE_GATE, inst_mask); return 0; } From 75d1692393cb78b510ff18733457f91d002452f7 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 16 Sep 2022 12:43:35 +0530 Subject: [PATCH 371/861] drm/amdgpu: Add initial version of XCP routines Within a device, an accelerator core partition can be constituted with different IP instances. These partitions are spatial in nature. Number of partitions which can exist at the same time depends on the 'partition mode'. Add a manager entity which is responsible for switching between different partition modes and maintaining partitions. It is also responsible for suspend/resume of different partitions. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 244 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 107 ++++++++++ 6 files changed, 356 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 8418a90df493..74a9aa6fe18c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -60,7 +60,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c2dd100bbd60..3858d29baef1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -283,6 +283,7 @@ extern uint amdgpu_user_partt_mode; #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) +struct amdgpu_xcp_mgr; struct amdgpu_device; struct amdgpu_irq_src; struct amdgpu_fpriv; @@ -765,6 +766,7 @@ struct amdgpu_device { struct amdgpu_acp acp; #endif struct amdgpu_hive_info *hive; + struct amdgpu_xcp_mgr *xcp_mgr; /* ASIC */ enum amd_asic_type asic_type; uint32_t family; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 023c5b08929f..5adfe4277641 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -61,7 +61,7 @@ enum amdgpu_gfx_partition { AMDGPU_TPX_PARTITION_MODE = 2, AMDGPU_QPX_PARTITION_MODE = 3, AMDGPU_CPX_PARTITION_MODE = 4, - AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, }; #define NUM_XCC(x) hweight16(x) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 639b86c4d664..eb25ac98903f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -96,7 +96,7 @@ struct amdgpu_nbio_funcs { void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev); void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); - u32 (*get_compute_partition_mode)(struct amdgpu_device *adev); + int (*get_compute_partition_mode)(struct amdgpu_device *adev); u32 (*get_memory_partition_mode)(struct amdgpu_device *adev); void (*set_compute_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c new file mode 100644 index 000000000000..f59bc450cabe --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -0,0 +1,244 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_xcp.h" + +static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, + struct amdgpu_xcp_ip *xcp_ip, int xcp_state) +{ + int (*run_func)(void *handle, uint32_t inst_mask); + int ret = 0; + + if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs) + return 0; + + run_func = NULL; + + switch (xcp_state) { + case AMDGPU_XCP_PREPARE_SUSPEND: + run_func = xcp_ip->ip_funcs->prepare_suspend; + break; + case AMDGPU_XCP_SUSPEND: + run_func = xcp_ip->ip_funcs->suspend; + break; + case AMDGPU_XCP_PREPARE_RESUME: + run_func = xcp_ip->ip_funcs->prepare_resume; + break; + case AMDGPU_XCP_RESUME: + run_func = xcp_ip->ip_funcs->resume; + break; + } + + if (run_func) + ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask); + + return ret; +} + +static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + int state) +{ + struct amdgpu_xcp_ip *xcp_ip; + struct amdgpu_xcp *xcp; + int i, ret; + + if (xcp_id > MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) + return -EINVAL; + + xcp = &xcp_mgr->xcp[xcp_id]; + for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) { + xcp_ip = &xcp->ip[i]; + ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state); + if (ret) + break; + } + + return ret; +} + +int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, + AMDGPU_XCP_PREPARE_SUSPEND); +} + +int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND); +} + +int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, + AMDGPU_XCP_PREPARE_RESUME); +} + +int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME); +} + +static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + struct amdgpu_xcp_ip *ip) +{ + struct amdgpu_xcp *xcp; + + if (!ip) + return; + + xcp = &xcp_mgr->xcp[xcp_id]; + xcp->ip[ip->ip_id] = *ip; + xcp->ip[ip->ip_id].valid = true; + + xcp->valid = true; +} + +static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) +{ + struct amdgpu_xcp_ip ip; + int i, j, ret; + + for (i = 0; i < MAX_XCP; ++i) + xcp_mgr->xcp[i].valid = false; + + for (i = 0; i < num_xcps; ++i) { + for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) { + ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j, + &ip); + if (ret) + continue; + + __amdgpu_xcp_add_block(xcp_mgr, i, &ip); + } + } + + xcp_mgr->num_xcps = num_xcps; + + return 0; +} + +int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +{ + int ret, num_xcps = 0; + + if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) + return -EINVAL; + + if (xcp_mgr->mode == mode) + return 0; + + if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) + return 0; + + mutex_lock(&xcp_mgr->xcp_lock); + + ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps); + + if (ret) + goto out; + + if (!num_xcps || num_xcps > MAX_XCP) { + ret = -EINVAL; + goto out; + } + + xcp_mgr->mode = mode; + __amdgpu_xcp_init(xcp_mgr, num_xcps); +out: + mutex_unlock(&xcp_mgr->xcp_lock); + + return ret; +} + +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + int mode; + + if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return xcp_mgr->mode; + + if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + return xcp_mgr->mode; + + mutex_lock(&xcp_mgr->xcp_lock); + mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); + if (mode != xcp_mgr->mode) + dev_WARN( + xcp_mgr->adev->dev, + "Cached partition mode %d not matching with device mode %d", + xcp_mgr->mode, mode); + + mutex_unlock(&xcp_mgr->xcp_lock); + + return mode; +} + +int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, + int init_num_xcps, + struct amdgpu_xcp_mgr_funcs *xcp_funcs) +{ + struct amdgpu_xcp_mgr *xcp_mgr; + + if (!xcp_funcs || !xcp_funcs->switch_partition_mode || + !xcp_funcs->get_ip_details) + return -EINVAL; + + xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL); + + if (!xcp_mgr) + return -ENOMEM; + + xcp_mgr->adev = adev; + xcp_mgr->funcs = xcp_funcs; + xcp_mgr->mode = init_mode; + mutex_init(&xcp_mgr->xcp_lock); + + if (init_mode != AMDGPU_XCP_MODE_NONE) + __amdgpu_xcp_init(xcp_mgr, init_num_xcps); + + adev->xcp_mgr = xcp_mgr; + + return 0; +} + +int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, + enum AMDGPU_XCP_IP_BLOCK ip, int instance) +{ + struct amdgpu_xcp *xcp; + int i, id_mask = 0; + + if (ip >= AMDGPU_XCP_MAX_BLOCKS) + return -EINVAL; + + for (i = 0; i < xcp_mgr->num_xcps; ++i) { + xcp = &xcp_mgr->xcp[i]; + if ((xcp->valid) && (xcp->ip[ip].valid) && + (xcp->ip[ip].inst_mask & BIT(instance))) + id_mask |= BIT(i); + } + + if (!id_mask) + id_mask = -ENXIO; + + return id_mask; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h new file mode 100644 index 000000000000..f0b973c6092f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -0,0 +1,107 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGPU_XCP_H +#define AMDGPU_XCP_H + +#include + +#define MAX_XCP 8 + +#define AMDGPU_XCP_MODE_NONE -1 + +enum AMDGPU_XCP_IP_BLOCK { + AMDGPU_XCP_GFXHUB, + AMDGPU_XCP_GFX, + AMDGPU_XCP_SDMA, + AMDGPU_XCP_VCN, + AMDGPU_XCP_MAX_BLOCKS +}; + +enum AMDGPU_XCP_STATE { + AMDGPU_XCP_PREPARE_SUSPEND, + AMDGPU_XCP_SUSPEND, + AMDGPU_XCP_PREPARE_RESUME, + AMDGPU_XCP_RESUME, +}; + +struct amdgpu_xcp_ip_funcs { + int (*prepare_suspend)(void *handle, uint32_t inst_mask); + int (*suspend)(void *handle, uint32_t inst_mask); + int (*prepare_resume)(void *handle, uint32_t inst_mask); + int (*resume)(void *handle, uint32_t inst_mask); +}; + +struct amdgpu_xcp_ip { + struct amdgpu_xcp_ip_funcs *ip_funcs; + uint32_t inst_mask; + + enum AMDGPU_XCP_IP_BLOCK ip_id; + bool valid; +}; + +struct amdgpu_xcp { + struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS]; + + uint8_t id; + uint8_t mem_node; + bool valid; +}; + +struct amdgpu_xcp_mgr { + struct amdgpu_device *adev; + struct mutex xcp_lock; + struct amdgpu_xcp_mgr_funcs *funcs; + + struct amdgpu_xcp xcp[MAX_XCP]; + uint8_t num_xcps; + int8_t mode; +}; + +struct amdgpu_xcp_mgr_funcs { + int (*switch_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr, int mode, + int *num_xcps); + int (*query_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr); + int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip); + + int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +}; + +int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + +int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, + int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs); +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); +int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, + enum AMDGPU_XCP_IP_BLOCK ip, int instance); + +#endif From 9cb18287d8f1c4549d95280e2cf60f4d1bab64f8 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 19 Sep 2022 17:34:02 +0530 Subject: [PATCH 372/861] drm/amdgpu: Add SOC partition funcs for GC v9.4.3 Switching the partition mode configuration of ASIC is SOC level function rather than something at GFX core level. Add partition mode switch functions as SOC specific callbacks. Implement the XCP manager callbacks needed for partition switch for GC 9.4.3 based ASICs. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 + .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 232 ++++++++++++++++++ 2 files changed, 236 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5adfe4277641..8be4ab50b171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,8 +278,12 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); + int (*switch_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); + + int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, + int num_xccs_per_xcp); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 2616bdb40418..4264fbd267ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -24,6 +24,12 @@ #include "soc15.h" #include "soc15_common.h" +#include "amdgpu_xcp.h" + +#define XCP_INST_MASK(num_inst, xcp_id) \ + (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) + +#define AMDGPU_XCP_OPS_KFD (1 << 0) void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) { @@ -119,3 +125,229 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) return ext_offset; } + +static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + struct amdgpu_device *adev = xcp_mgr->adev; + + if (adev->nbio.funcs->get_compute_partition_mode) + mode = adev->nbio.funcs->get_compute_partition_mode(adev); + + return mode; +} + +int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +{ + int num_xcc, num_xcc_per_xcp = 0; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcc_per_xcp = 1; + break; + } + + return num_xcc_per_xcp; +} + +int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; + int num_sdma, num_vcn; + + num_sdma = adev->sdma.num_instances; + num_vcn = adev->vcn.num_vcn_inst; + + switch (xcp_mgr->mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_sdma_xcp = num_sdma; + num_vcn_xcp = num_vcn; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 2; + num_vcn_xcp = num_vcn / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 3; + num_vcn_xcp = num_vcn / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 4; + num_vcn_xcp = num_vcn / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_sdma_xcp = 2; + num_vcn_xcp = num_vcn ? 1 : 0; + break; + default: + return -EINVAL; + } + + num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + + switch (ip_id) { + case AMDGPU_XCP_GFXHUB: + ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_GFX: + ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_SDMA: + ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_VCN: + ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + default: + return -EINVAL; + } + + ip->ip_id = ip_id; + + return 0; +} + +static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, + enum amdgpu_gfx_partition mode) +{ + int num_xcc, num_xccs_per_xcp; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + return num_xcc > 0; + case AMDGPU_DPX_PARTITION_MODE: + return (num_xcc % 4) == 0; + case AMDGPU_TPX_PARTITION_MODE: + return (num_xcc % 3) == 0; + case AMDGPU_QPX_PARTITION_MODE: + num_xccs_per_xcp = num_xcc / 4; + return (num_xccs_per_xcp >= 2); + case AMDGPU_CPX_PARTITION_MODE: + return (num_xcc > 1); + default: + return false; + } + + return false; +} + +static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); + + return 0; +} + +static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int ret = 0; + + if (flags & AMDGPU_XCP_OPS_KFD) { + amdgpu_amdkfd_device_probe(xcp_mgr->adev); + amdgpu_amdkfd_device_init(xcp_mgr->adev); + /* If KFD init failed, return failure */ + if (!xcp_mgr->adev->kfd.init_complete) + ret = -EIO; + } + + return ret; +} + +static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, int *num_xcps) +{ + int num_xcc_per_xcp, num_xcc, ret; + struct amdgpu_device *adev; + u32 flags = 0; + + adev = xcp_mgr->adev; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + return -EINVAL; + + if (adev->kfd.init_complete) + flags |= AMDGPU_XCP_OPS_KFD; + + if (flags & AMDGPU_XCP_OPS_KFD) { + ret = amdgpu_amdkfd_check_and_lock_kfd(adev); + if (ret) + goto out; + } + + ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); + if (ret) + goto unlock; + + num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); + if (adev->gfx.funcs->switch_gfx_partition_mode) + adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + num_xcc_per_xcp); + + if (adev->nbio.funcs->set_compute_partition_mode) + adev->nbio.funcs->set_compute_partition_mode(adev, mode); + + ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); +unlock: + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_unlock_kfd(adev); +out: + return ret; +} + +int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip) +{ + if (!ip) + return -EINVAL; + + return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip); +} + +struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { + .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, + .query_partition_mode = &aqua_vanjaram_query_partition_mode, + .get_ip_details = &aqua_vanjaram_get_xcp_ip_details +}; + +static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1, + &aqua_vanjaram_xcp_funcs); + if (ret) + return ret; + + /* TODO: Default memory node affinity init */ + + return ret; +} From e56c9ef6cb35f33dc83f635419ae55adf69db9fc Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 23 Sep 2022 14:43:17 +0530 Subject: [PATCH 373/861] drm/amdgpu: Add soc config init for GC9.4.3 ASICs Add function to initialize soc configuration information for GC 9.4.3 ASICs. Use it to map IPs and other SOC related information once IP configuration information is available through discovery. For GC9.4.3 compute partition related callbacks are initialized as part of configuration init. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b602df4b445a..e6d10a3f1753 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1983,11 +1983,11 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) return 0; } -static void amdgpu_discovery_ip_map_init(struct amdgpu_device *adev) +static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) { switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): - aqua_vanjaram_ip_map_init(adev); + aqua_vanjaram_init_soc_config(adev); break; default: break; @@ -2171,7 +2171,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } - amdgpu_discovery_ip_map_init(adev); + amdgpu_discovery_init_soc_config(adev); switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 4264fbd267ae..a9de229a2828 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -351,3 +351,16 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) return ret; } + +int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) +{ + int ret; + + ret = aqua_vanjaram_xcp_mgr_init(adev); + if (ret) + return ret; + + aqua_vanjaram_ip_map_init(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index dd48db09aa51..eac54042c6c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -113,6 +113,7 @@ int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); +int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); From 8e7fd19380f9187dae3ad18a61793b1752dfa097 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 16 Nov 2022 17:15:47 +0530 Subject: [PATCH 374/861] drm/amdgpu: Switch to SOC partition funcs For GFXv9.4.3, use SOC level partition switch implementation rather than keeping them at GFX IP level. Change the exisiting implementation in GFX IP for keeping partition mode and restrict it to only GFX related switch. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 31 ++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 -- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 59 +++---------------- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 ++- 5 files changed, 20 insertions(+), 87 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5ff49737d7c6..f895a4b8ca0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -28,6 +28,7 @@ #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" #include "amdgpu_ras.h" +#include "amdgpu_xcp.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -1170,10 +1171,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - enum amdgpu_gfx_partition mode; + int mode; char *partition_mode; - mode = adev->gfx.funcs->query_partition_mode(adev); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: @@ -1254,31 +1255,7 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } - if (!adev->kfd.init_complete) - return -EPERM; - - mutex_lock(&adev->gfx.partition_mutex); - - if (mode == adev->gfx.funcs->query_partition_mode(adev)) - goto out; - - ret = amdgpu_amdkfd_check_and_lock_kfd(adev); - if (ret) - goto out; - - amdgpu_amdkfd_device_fini_sw(adev); - - adev->gfx.funcs->switch_partition_mode(adev, mode); - - amdgpu_amdkfd_device_probe(adev); - amdgpu_amdkfd_device_init(adev); - /* If KFD init failed, return failure */ - if (!adev->kfd.init_complete) - ret = -EIO; - - amdgpu_amdkfd_unlock_kfd(adev); -out: - mutex_unlock(&adev->gfx.partition_mutex); + ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8be4ab50b171..2287768ed141 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,11 +278,7 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); - int (*switch_partition_mode)(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode); - - int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); }; @@ -416,7 +412,6 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ - enum amdgpu_gfx_partition partition_mode; uint16_t xcc_mask; enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a9de229a2828..bbcdececfd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -307,8 +307,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto unlock; num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); - if (adev->gfx.funcs->switch_gfx_partition_mode) - adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + if (adev->gfx.funcs->switch_partition_mode) + adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev, num_xcc_per_xcp); if (adev->nbio.funcs->set_compute_partition_mode) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 93a0baa4515c..d684037a7a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -38,6 +38,7 @@ #include "gc/gc_9_4_3_sh_mask.h" #include "gfx_v9_4_3.h" +#include "amdgpu_xcp.h" MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); @@ -614,61 +615,23 @@ gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) return mode; } -static enum amdgpu_gfx_partition -gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) -{ - enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; - - if (adev->nbio.funcs->get_compute_partition_mode) - mode = adev->nbio.funcs->get_compute_partition_mode(adev); - - return mode; -} - static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode) + int num_xccs_per_xcp) { + int i, num_xcc; u32 tmp = 0; - int num_xcc_per_partition, i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (mode) { - case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc; - break; - case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 2; - break; - case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 3; - break; - case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 4; - break; - case AMDGPU_CPX_PARTITION_MODE: - num_xcc_per_partition = 1; - break; - default: - return -EINVAL; - } - - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xcc_per_partition); + num_xccs_per_xcp); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xcc_per_partition); + i % num_xccs_per_xcp); WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); } - if (adev->nbio.funcs->set_compute_partition_mode) - adev->nbio.funcs->set_compute_partition_mode(adev, mode); - - adev->gfx.num_xcc_per_xcp = num_xcc_per_partition; - adev->gfx.partition_mode = mode; + adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; return 0; } @@ -680,7 +643,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, - .query_partition_mode = &gfx_v9_4_3_query_compute_partition, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, }; @@ -1899,10 +1861,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) return r; } - if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - gfx_v9_4_3_switch_compute_partition(adev, - amdgpu_user_partt_mode); - /* set the virtual and physical id based on partition_mode */ gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); @@ -1931,6 +1889,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { int r, i, num_xcc; + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_cp_resume(adev, i); @@ -2146,8 +2107,6 @@ static int gfx_v9_4_3_early_init(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); - adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; - adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_4_3_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 293787290e36..7a963d0a34e2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -34,6 +34,7 @@ #include "kfd_smi_events.h" #include "kfd_migrate.h" #include "amdgpu.h" +#include "amdgpu_xcp.h" #define MQD_SIZE_ALIGNED 768 @@ -592,7 +593,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; - int num_xcd; + int num_xcd, partition_mode; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -644,8 +645,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr); if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { vmid_num_kfd /= 2; first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; @@ -761,7 +763,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->start_xcc_id = node->num_xcc_per_node * i; if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { /* For GFX9.4.3 and CPX mode, first XCD gets VMID range * 4-9 and second XCD gets VMID range 10-15. From dfdd6f89c4c7f3315d9cabd9bf80a8174ecb5753 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 23 Sep 2022 15:20:08 +0530 Subject: [PATCH 375/861] drm/amdgpu: Add GFXHUB v1.2 XCP funcs Add functions required for suspend/resume of GFXHUB instances which are part of an XCP. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 35 ++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index b9316bfb7263..8ba59ffe0e9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -21,6 +21,7 @@ * */ #include "amdgpu.h" +#include "amdgpu_xcp.h" #include "gfxhub_v1_2.h" #include "gfxhub_v1_1.h" @@ -638,3 +639,37 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .init = gfxhub_v1_2_init, .get_xgmi_info = gfxhub_v1_2_get_xgmi_info, }; + +static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool value; + int ret; + + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) + value = false; + else + value = true; + + gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask); + + if (!amdgpu_sriov_vf(adev)) + ret = gfxhub_v1_2_xcc_gart_enable(adev, inst_mask); + + return ret; +} + +static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_2_xcc_gart_disable(adev, inst_mask); + + return 0; +} + +struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = { + .suspend = &gfxhub_v1_2_xcp_suspend, + .resume = &gfxhub_v1_2_xcp_resume +}; From 3446cb78f3a8033fda416825282e7cafecc83129 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 23 Sep 2022 15:40:15 +0530 Subject: [PATCH 376/861] drm/amdgpu: Add SDMA v4.4.2 XCP funcs Add functions required to suspend/resume instances of SDMA which are part of an XCP. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 36 ++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 6a971e15b4b0..184eb7902722 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -27,6 +27,7 @@ #include #include "amdgpu.h" +#include "amdgpu_xcp.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" @@ -2025,3 +2026,38 @@ const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = { .rev = 0, .funcs = &sdma_v4_4_2_ip_funcs, }; + +static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + if (!amdgpu_sriov_vf(adev)) + sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask); + + r = sdma_v4_4_2_inst_start(adev, inst_mask); + + return r; +} + +static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t tmp_mask = inst_mask; + int i; + + for_each_inst(i, tmp_mask) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } + + sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask); + sdma_v4_4_2_inst_enable(adev, false, inst_mask); + + return 0; +} + +struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs = { + .suspend = &sdma_v4_4_2_xcp_suspend, + .resume = &sdma_v4_4_2_xcp_resume +}; From 73c84f7c478aeb98bce494cac73f2d20f4a81c6e Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 23 Sep 2022 16:48:43 +0530 Subject: [PATCH 377/861] drm/amdgpu: Add XCP functions for GFX v9.4.3 Add functions to suspend/resume GFX instances belonging to an XCP. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 47 +++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index d684037a7a5d..aaa67592bbb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -23,6 +23,7 @@ #include #include "amdgpu.h" +#include "amdgpu_xcp.h" #include "amdgpu_gfx.h" #include "soc15.h" #include "soc15d.h" @@ -3177,3 +3178,49 @@ const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { .rev = 0, .funcs = &gfx_v9_4_3_ip_funcs, }; + +static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t tmp_mask; + int i, r; + + /* TODO : Initialize golden regs */ + /* gfx_v9_4_3_init_golden_registers(adev); */ + + tmp_mask = inst_mask; + for_each_inst(i, tmp_mask) + gfx_v9_4_3_xcc_constants_init(adev, i); + + tmp_mask = inst_mask; + for_each_inst(i, tmp_mask) { + r = gfx_v9_4_3_xcc_rlc_resume(adev, i); + if (r) + return r; + } + + tmp_mask = inst_mask; + for_each_inst(i, tmp_mask) { + r = gfx_v9_4_3_xcc_cp_resume(adev, i); + if (r) + return r; + } + + return 0; +} + +static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + for_each_inst(i, inst_mask) + gfx_v9_4_3_xcc_fini(adev, i); + + return 0; +} + +struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { + .suspend = &gfx_v9_4_3_xcp_suspend, + .resume = &gfx_v9_4_3_xcp_resume +}; From 845c9b313f3122191c847fcc4092492ce039542a Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 23 Sep 2022 17:15:15 +0530 Subject: [PATCH 378/861] drm/amdgpu: Add XCP IP callback funcs for each IP Initialize with the IP specific functions needed for GFXHUB, GFX and SDMA. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 9 ++++++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h | 2 ++ drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index bbcdececfd2f..2cbac0bccd80 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -25,6 +25,9 @@ #include "soc15_common.h" #include "amdgpu_xcp.h" +#include "gfx_v9_4_3.h" +#include "gfxhub_v1_2.h" +#include "sdma_v4_4_2.h" #define XCP_INST_MASK(num_inst, xcp_id) \ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) @@ -205,15 +208,15 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, switch (ip_id) { case AMDGPU_XCP_GFXHUB: ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); - /* TODO : Assign IP funcs */ + ip->ip_funcs = &gfxhub_v1_2_xcp_funcs; break; case AMDGPU_XCP_GFX: ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); - /* TODO : Assign IP funcs */ + ip->ip_funcs = &gfx_v9_4_3_xcp_funcs; break; case AMDGPU_XCP_SDMA: ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id); - /* TODO : Assign IP funcs */ + ip->ip_funcs = &sdma_v4_4_2_xcp_funcs; break; case AMDGPU_XCP_VCN: ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h index 4b530f4c1295..42d67ee0e7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h @@ -26,4 +26,6 @@ extern const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block; +extern struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs; + #endif /* __GFX_V9_4_3_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h index e2d508f5a7ee..997e9f90c990 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h @@ -26,4 +26,6 @@ extern const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs; +extern struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs; + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h index 4814e8a074d6..d516145529bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h @@ -27,4 +27,6 @@ extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs; extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block; +extern struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs; + #endif From 5b03127d4745d6848f208463390e6a76d489eb03 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 24 Nov 2022 14:23:58 +0530 Subject: [PATCH 379/861] drm/amdgpu: Skip TMR allocation if not required On ASICs with PSPv13.0.6, TMR is reserved at boot time. There is no need to allocate TMR region by driver. However, it's still required to send SETUP_TMR command to PSP. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 34 +++++++++++++++++++------ 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 1319df5796cc..863fa331e6ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -703,8 +703,13 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp, uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) { struct amdgpu_device *adev = psp->adev; - uint32_t size = amdgpu_bo_size(tmr_bo); - uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); + uint32_t size = 0; + uint64_t tmr_pa = 0; + + if (tmr_bo) { + size = amdgpu_bo_size(tmr_bo); + tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); + } if (amdgpu_sriov_vf(psp->adev)) cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; @@ -749,6 +754,16 @@ static int psp_load_toc(struct psp_context *psp, return ret; } +static bool psp_boottime_tmr(struct psp_context *psp) +{ + switch (psp->adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 6): + return true; + default: + return false; + } +} + /* Set up Trusted Memory Region */ static int psp_tmr_init(struct psp_context *psp) { @@ -820,8 +835,9 @@ static int psp_tmr_load(struct psp_context *psp) cmd = acquire_psp_cmd_buf(psp); psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); - DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", - amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); + if (psp->tmr_bo) + DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", + amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -2080,10 +2096,12 @@ static int psp_hw_start(struct psp_context *psp) if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; - ret = psp_tmr_init(psp); - if (ret) { - DRM_ERROR("PSP tmr init failed!\n"); - return ret; + if (!psp_boottime_tmr(psp)) { + ret = psp_tmr_init(psp); + if (ret) { + DRM_ERROR("PSP tmr init failed!\n"); + return ret; + } } skip_pin_bo: From fee500fa7cb7e11a4d2d66e75e65e67c156e27c6 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Fri, 18 Nov 2022 14:21:15 +0800 Subject: [PATCH 380/861] drm/amdgpu: Fix the KCQ hang when binding back Just like the KIQ, KCQ need to clear the doorbell related regs as well to avoid hangs when to load driver again after unloading. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 28 ++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index aaa67592bbb5..ef552c9b19b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1670,7 +1670,7 @@ static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, return 0; } -static int gfx_v9_4_3_xcc_kiq_fini_register(struct amdgpu_ring *ring, +static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; @@ -1688,7 +1688,7 @@ static int gfx_v9_4_3_xcc_kiq_fini_register(struct amdgpu_ring *ring, } if (j == AMDGPU_MAX_USEC_TIMEOUT) { - DRM_DEBUG("KIQ dequeue request failed.\n"); + DRM_DEBUG("%s dequeue request failed.\n", ring->name); /* Manual disable if dequeue request times out */ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); @@ -1793,6 +1793,27 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } +static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_ring *ring; + int j; + + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, ring->me, + ring->pipe, + ring->queue, 0, GET_INST(GC, xcc_id)); + gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); + mutex_unlock(&adev->srbm_mutex); + } + } + + return 0; +} + static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; @@ -1923,12 +1944,13 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) adev->gfx.kiq[xcc_id].ring.pipe, adev->gfx.kiq[xcc_id].ring.queue, 0, GET_INST(GC, xcc_id)); - gfx_v9_4_3_xcc_kiq_fini_register(&adev->gfx.kiq[xcc_id].ring, + gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring, xcc_id); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); } + gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); /* Skip suspend with A+A reset */ From dc6df2095deaaefe38a94d62a51b0d07c0794eaf Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 28 Nov 2022 09:57:51 +0530 Subject: [PATCH 381/861] drm/amdgpu: Move generic logic to soc config Move soc specific configuration details to aqua vanjaram specific file. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 2cbac0bccd80..a6204b588829 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -359,6 +359,15 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { int ret; + /* generally 1 AID supports 4 instances */ + adev->sdma.num_inst_per_aid = 4; + adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); + + adev->vcn.num_inst_per_aid = 1; + adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * adev->num_aid; + adev->jpeg.num_inst_per_aid = 1; + adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * adev->num_aid; + ret = aqua_vanjaram_xcp_mgr_init(adev); if (ret) return ret; From bbca579fd2ea8cbc170df33587f8a4b572a4f025 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 28 Nov 2022 11:17:15 +0530 Subject: [PATCH 382/861] drm/amdgpu: Derive active clusters from SDMA SDMA instances per active cluster and SDMA instance mask are used to find the number of active clusters. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a6204b588829..6f7226b5d446 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -357,12 +357,22 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { + u32 inst_mask = adev->sdma.sdma_mask; int ret; /* generally 1 AID supports 4 instances */ adev->sdma.num_inst_per_aid = 4; adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); + adev->num_aid = 1; + inst_mask >>= adev->sdma.num_inst_per_aid; + + for (const u32 mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; + inst_mask >>= adev->sdma.num_inst_per_aid) { + if ((inst_mask & mask) == mask) + adev->num_aid++; + } + adev->vcn.num_inst_per_aid = 1; adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * adev->num_aid; adev->jpeg.num_inst_per_aid = 1; From 7a1efad04c210594069c4ab9f9c25039cd6915e4 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 29 Nov 2022 14:00:37 +0530 Subject: [PATCH 383/861] drm/amdgpu: Use mask for active clusters Use a mask of available active clusters instead of using only the number of active clusters. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 13 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 56 ++++++++++++------- 4 files changed, 49 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3858d29baef1..279057ec7a0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1070,7 +1070,8 @@ struct amdgpu_device { bool job_hang; bool dc_enabled; - uint32_t num_aid; + /* Mask of active clusters */ + uint32_t aid_mask; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 6f7226b5d446..0d7bc212def1 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -358,25 +358,26 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { u32 inst_mask = adev->sdma.sdma_mask; - int ret; + int ret, i, num_inst; /* generally 1 AID supports 4 instances */ adev->sdma.num_inst_per_aid = 4; adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); - adev->num_aid = 1; + adev->aid_mask = i = 1; inst_mask >>= adev->sdma.num_inst_per_aid; for (const u32 mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; - inst_mask >>= adev->sdma.num_inst_per_aid) { + inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { if ((inst_mask & mask) == mask) - adev->num_aid++; + adev->aid_mask |= (1 << i); } + num_inst = hweight32(adev->aid_mask); adev->vcn.num_inst_per_aid = 1; - adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * adev->num_aid; + adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * num_inst; adev->jpeg.num_inst_per_aid = 1; - adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * adev->num_aid; + adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * num_inst; ret = aqua_vanjaram_xcp_mgr_init(adev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0792c48fe347..b3f64f2f306d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1674,6 +1674,7 @@ static int gmc_v9_0_sw_init(void *handle) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned long inst_mask = adev->aid_mask; adev->gfxhub.funcs->init(adev); @@ -1757,7 +1758,9 @@ static int gmc_v9_0_sw_init(void *handle) case IP_VERSION(9, 4, 3): bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), NUM_XCC(adev->gfx.xcc_mask)); - bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), adev->num_aid); + + inst_mask <<= AMDGPU_MMHUB0(0); + bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 6f469b9aa9a0..a530e2a3cc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -54,9 +54,11 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi uint64_t page_table_base) { struct amdgpu_vmhub *hub; + u32 inst_mask; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; WREG32_SOC15_OFFSET(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -73,6 +75,7 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; + u32 inst_mask; int i; if (adev->gmc.pdb0_bo) @@ -85,7 +88,8 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { if (adev->gmc.pdb0_bo) { WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -121,11 +125,12 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) { + uint32_t tmp, inst_mask; uint64_t value; - uint32_t tmp; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { /* Program the AGP BAR */ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0); WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, @@ -183,11 +188,12 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, inst_mask; int i; /* Setup TLB control */ - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, @@ -208,14 +214,15 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, inst_mask; int i; if (amdgpu_sriov_vf(adev)) return; /* Setup L2 cache */ - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, @@ -267,10 +274,11 @@ static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, inst_mask; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, @@ -286,12 +294,14 @@ static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) { + u32 inst_mask; int i; if (amdgpu_sriov_vf(adev)) return; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { WREG32_SOC15(MMHUB, i, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); @@ -317,7 +327,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; unsigned num_level, block_size; - uint32_t tmp; + uint32_t tmp, inst_mask; int i, j; num_level = adev->vm_manager.num_level; @@ -327,7 +337,8 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (j = 0; j < adev->num_aid; j++) { + inst_mask = adev->aid_mask; + for_each_inst(j, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, @@ -382,9 +393,10 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - unsigned i, j; + u32 i, j, inst_mask; - for (j = 0; j < adev->num_aid; j++) { + inst_mask = adev->aid_mask; + for_each_inst(j, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i < 18; ++i) { WREG32_SOC15_OFFSET(MMHUB, j, @@ -429,10 +441,11 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; u32 tmp; - u32 i, j; + u32 i, j, inst_mask; /* Disable all tables */ - for (j = 0; j < adev->num_aid; j++) { + inst_mask = adev->aid_mask; + for_each_inst(j, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i < 16; i++) WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, @@ -465,13 +478,14 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) */ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) { - u32 tmp; + u32 tmp, inst_mask; int i; if (amdgpu_sriov_vf(adev)) return; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -510,9 +524,11 @@ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_8_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; + u32 inst_mask; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i, From 753b999afe47900531282f86bf430aec250b4232 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Wed, 7 Dec 2022 00:29:40 -0500 Subject: [PATCH 384/861] drm/amdgpu: set MTYPE in PTE for GFXIP 9.4.3 Apply the GFXIP 9.4.3 specific snoop and mtype settings for various scenarios such as APU, APU in Carveout mode and dGPU mode. Note: This is expected to change due to: 1 - NPS > 1 support in future 2 - Hardware bugs found during initial asic bringup. Cc: Graham Sider Cc: Hawking Zhang Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 40 ++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b3f64f2f306d..3765178e6fc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1173,7 +1173,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): - case IP_VERSION(9, 4, 3): if (is_vram) { if (bo_adev == adev) { if (uncached) @@ -1207,6 +1206,45 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, */ snoop = true; } + break; + case IP_VERSION(9, 4, 3): + /* FIXME: Needs more work for handling multiple memory + * partitions (> NPS1 mode) e.g. NPS4 for both APU and dGPU + * modes. + */ + snoop = true; + if (uncached) { + mtype = MTYPE_UC; + } else if (adev->gmc.is_app_apu) { + /* FIXME: APU in native mode, NPS1 single socket only + * + * For suporting NUMA partitioned APU e.g. in NPS4 mode, + * this need to look at the NUMA node on which the + * system memory allocation was done. + * + * Memory access by a different partition within same + * socket should be treated as remote access so MTYPE_RW + * cannot be used always. + */ + mtype = MTYPE_RW; + } else if (adev->flags & AMD_IS_APU) { + /* APU on carve out mode */ + mtype = MTYPE_RW; + } else { + /* dGPU */ + /* + if ((mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && + bo_adev == adev) + mapping_flags |= AMDGPU_VM_MTYPE_RW; + else + */ + /* Temporarily comment out above lines and use MTYPE_NC + * on both VRAM and system memory access until + * MTYPE_RW can properly work on VRAM access + */ + mtype = MTYPE_NC; + } + break; default: if (uncached || coherent) From 1794e9d7e78cb52605234d0ddc3f46084937f4e9 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Thu, 8 Dec 2022 12:08:17 -0500 Subject: [PATCH 385/861] drm/amdkfd: Update packet manager for GFX9.4.3 In GFX 9.4.3, there can be more than 8 SDMA engines. As a result, extended_engine_sel and engine_sel fields in MAP_QUEUES packet need to be updated to allow correct mapping of SDMA queues to these SDMA engines. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 16 +++++++++++++--- drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 3 ++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 54d7d4665ad2..44cf3a5f6fdb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -225,9 +225,19 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->bitfields2.engine_sel = q->properties.sdma_engine_id + engine_sel__mes_map_queues__sdma0_vi; else { - packet->bitfields2.extended_engine_sel = - extended_engine_sel__mes_map_queues__sdma0_to_7_sel; - packet->bitfields2.engine_sel = q->properties.sdma_engine_id; + /* + * For GFX9.4.3, SDMA engine id can be greater than 8. + * For such cases, set extended_engine_sel to 2 and + * ensure engine_sel lies between 0-7. + */ + if (q->properties.sdma_engine_id >= 8) + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__sdma8_to_15_sel; + else + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__sdma0_to_7_sel; + + packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8; } break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index a666710ed403..2ad708c64012 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -263,7 +263,8 @@ enum mes_map_queues_engine_sel_enum { enum mes_map_queues_extended_engine_sel_enum { extended_engine_sel__mes_map_queues__legacy_engine_sel = 0, - extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1 + extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1, + extended_engine_sel__mes_map_queues__sdma8_to_15_sel = 2 }; struct pm4_mes_map_queues { From 98b2e9cad2279132e3aa4b9caf9164b2e35c1a52 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 9 Dec 2022 19:44:05 +0800 Subject: [PATCH 386/861] drm/amdgpu: correct the vmhub index when page fault occurs The AMDGPU_GFXHUB was bind to each xcc in the logical order. Thus convert the node_id to logical xcc_id to index the correct AMDGPU_GFXHUB. And "node_id / 4" can get the correct AMDGPU_MMHUB0 index. Signed-off-by: Le Ma Tested-by: Asad kamal Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 27 +++++++++++++------------ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 +++++++++---- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2287768ed141..81b4c7e684af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -280,6 +280,7 @@ struct amdgpu_gfx_funcs { (*query_mem_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); + int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ef552c9b19b5..6aaa810ea044 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -637,6 +637,19 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, return 0; } +static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) +{ + int xcc; + + xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); + if (!xcc) { + dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); + return -EINVAL; + } + + return xcc - 1; +} + static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, @@ -646,6 +659,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, + .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) @@ -2754,19 +2768,6 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) -{ - int xcc; - - xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); - if (!xcc) { - dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); - return -EINVAL; - } - - return xcc - 1; -} - static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3765178e6fc5..841333148610 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,22 +557,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; - uint32_t node_id; + uint32_t node_id, xcc_id = 0; - node_id = (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? entry->node_id : 0; + node_id = entry->node_id; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; if (entry->client_id == SOC15_IH_CLIENTID_VMC) { hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)]; } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { hub_name = "mmhub1"; hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - hub = &adev->vmhub[node_id/2]; + if (adev->gfx.funcs->ih_node_to_logical_xcc) { + xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, + node_id); + if (xcc_id < 0) + xcc_id = 0; + } + hub = &adev->vmhub[xcc_id]; } if (retry_fault) { From a820d3ca8ed3a2851e1c9d5713e807f84e88019d Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 1 Dec 2022 17:22:01 +0530 Subject: [PATCH 387/861] drm/amdgpu: Remove unnecessary return value check There is no need to check return value, as the function internally used - amdgpu_discovery_read_binary_from_vram() - returns void. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index e6d10a3f1753..5b6088fc2b2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -201,14 +201,13 @@ static int hw_id_map[MAX_HWIP] = { [PCIE_HWIP] = PCIE_HWID, }; -static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) +static void amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) { uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, adev->mman.discovery_tmr_size, false); - return 0; } static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) @@ -302,12 +301,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (!adev->mman.discovery_bin) return -ENOMEM; - r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); - if (r) { - dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); - r = -EINVAL; - goto out; - } + amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) { /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */ From c2d43918a14f7b0f04932f5a45728e0fe8161da0 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 9 Dec 2022 09:03:01 -0500 Subject: [PATCH 388/861] drm/amdkfd: Setup current_logical_xcc_id in MQD Setup rolling current_logical_xcc_id in MQD for GFX9.4.3 to ensure each queue starts at a different place and prevent hotspotting issues. Also, remove updating current_logical_xcc_id during queue update. Suggested-by: Joseph Greathouse Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 13 +++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index b11c474d4067..cd4383bb207f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -256,6 +256,9 @@ struct device_queue_manager { struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; bool sched_running; + + /* used for GFX 9.4.3 only */ + uint32_t current_logical_xcc_start; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 2e2a0f8586f7..c781314b213c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -32,6 +32,7 @@ #include "gc/gc_9_0_sh_mask.h" #include "sdma0/sdma0_4_0_sh_mask.h" #include "amdgpu_amdkfd.h" +#include "kfd_device_queue_manager.h" static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q, @@ -569,6 +570,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, uint64_t xcc_gart_addr = 0; uint64_t xcc_ctx_save_restore_area_address; uint64_t offset = mm->mqd_stride(mm, q); + uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { @@ -596,18 +598,17 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) { m->compute_tg_chunk_size = 1; + m->compute_current_logic_xcc_id = + (local_xcc_start + xcc) % + mm->dev->num_xcc_per_node; switch (xcc) { case 0: /* Master XCC */ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; - m->compute_current_logic_xcc_id = - mm->dev->num_xcc_per_node - 1; break; default: - m->compute_current_logic_xcc_id = - xcc - 1; break; } } else { @@ -642,12 +643,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, /* Master XCC */ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; - m->compute_current_logic_xcc_id = - mm->dev->num_xcc_per_node - 1; break; default: - m->compute_current_logic_xcc_id = - xcc - 1; break; } m->compute_tg_chunk_size = 1; From a0a0c69c05bff025abf49ec66b2bfb94aeabcc6e Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 14 Dec 2022 10:28:50 +0530 Subject: [PATCH 389/861] drm/amdgpu: Fix semaphore release Use the right register for semaphore release during invalidation. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 841333148610..1e4364120845 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -915,9 +915,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * write with 0 means semaphore release */ if (vmhub >= AMDGPU_MMHUB0(0)) - WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); else - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); } spin_unlock(&adev->gmc.invalidate_lock); From 12c4d7edfb7238ded6c7a2584995d888b4d877ec Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 15 Dec 2022 13:13:29 +0530 Subject: [PATCH 390/861] drm/amdgpu: Fix GFX 9.4.3 dma address capability MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ASICs with GFX 9.4.3 support 48-bit addressing. Signed-off-by: Lijo Lazar Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 1e4364120845..444441c6b7e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1846,7 +1846,7 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ - dma_addr_bits = adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ? 48:44; + dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); if (r) { printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); From d524180b88009d9158bff7fd20f3916455e0c32c Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 19 Dec 2022 17:39:42 +0530 Subject: [PATCH 391/861] drm/amdgpu: Fix GFX v9.4.3 EOP buffer allocation Each compute cluster gets 8 compute queues in GFX v9.4.3. Fix the EOP buffer allocation so that compute queue on every XCC gets a unique address. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Tested-and-Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 6aaa810ea044..b56fa2945464 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -455,7 +455,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); - mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; + mec_hpd_size = + adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; if (mec_hpd_size) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, From 85b45b60722f506322393320bb6cc195378f2e4f Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Mon, 28 Nov 2022 11:26:02 -0500 Subject: [PATCH 392/861] amd/amdgpu: Set MTYPE_UC for access over PCIe For GFX v9_4_3, set MTYPE_UC for memory access over PCIe. v4 - add missing indentation pointed out by Felix and add his reviewed-by tag. v3 - add missing logic for the svm path. v2 - add amdgpu_xgmi_same_hive to separate access over xgmi from pcie Reviewed-by: Felix Kuehling Signed-off-by: Amber Lin Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 29 ++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 5d6e02559d8e..6daba0582bf3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1195,16 +1195,29 @@ svm_range_get_pte_flags(struct kfd_node *node, //e.g. NPS4. Current approch is only applicable without memory //partitions. snoop = true; - if (uncached) + if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; - /* local HBM region close to partition*/ - else if (bo_node == node) - mapping_flags |= AMDGPU_VM_MTYPE_RW; - /* local HBM region far from partition or remote XGMI GPU or - * system memory - */ - else + } else if (domain == SVM_RANGE_VRAM_DOMAIN) { + /* local HBM region close to partition with a workaround + * for Endpoint systems. + */ + if (bo_node == node) + mapping_flags |= + (node->adev->flags & AMD_IS_APU) ? + AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_NC; + /* local HBM region far from partition or remote XGMI GPU */ + else if (svm_nodes_in_same_hive(bo_node, node)) + mapping_flags |= AMDGPU_VM_MTYPE_NC; + /* PCIe P2P */ + else + mapping_flags |= AMDGPU_VM_MTYPE_UC; + /* system memory accessed by the APU */ + } else if (node->adev->flags & AMD_IS_APU) { mapping_flags |= AMDGPU_VM_MTYPE_NC; + /* system memory accessed by the dGPU */ + } else { + mapping_flags |= AMDGPU_VM_MTYPE_UC; + } break; default: mapping_flags |= coherent ? From 7a7aaab021a6772b29c81c22db9f4d2d8cd66ecd Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Tue, 20 Dec 2022 15:37:57 -0500 Subject: [PATCH 393/861] drm/amdgpu: Make UTCL2 snoop CPU caches On AMD APP APUs, to make UTCL2 snoop CPU caches, its not sufficient to rely on xgmi connected flag so add the logic to use is_app_apu to program the PDE_REQUEST_PHYSICAL bit correctly for gfxhub and mmhub both. Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 3 ++- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 8ba59ffe0e9f..8901e73fd700 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -252,7 +252,8 @@ static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev, WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp); tmp = regVM_L2_CNTL4_DEFAULT; - if (adev->gmc.xgmi.connected_to_cpu) { + /* For AMD APP APUs setup WC memory */ + if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) { tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index a530e2a3cc28..a8faf66b6878 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -257,7 +257,8 @@ static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp); tmp = regVM_L2_CNTL4_DEFAULT; - if (adev->gmc.xgmi.connected_to_cpu) { + /* For AMD APP APUs setup WC memory */ + if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) { tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, From 34fd9d686772f6725242e900913ca2be987c12dd Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 20 Dec 2022 14:21:57 +0530 Subject: [PATCH 394/861] drm/amdgpu: Add FGCG logic for GFX v9.4.3 Add logic for fine grain clock gating logic for GFX v9.4.3. The feature will be controlled using CG flags. Also, make a change so that RLC safe mode entry/exit is done only once during CG update sequence. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 71 ++++++++++++++++++++++--- 1 file changed, 65 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b56fa2945464..55d99c4ea48c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2172,14 +2172,64 @@ static int gfx_v9_4_3_late_init(void *handle) return 0; } +static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev, + bool enable, int xcc_id) +{ + uint32_t def, data; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) + return; + + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), + regRLC_CGTT_MGCG_OVERRIDE); + + if (enable) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; + else + data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; + + if (def != data) + WREG32_SOC15(GC, GET_INST(GC, xcc_id), + regRLC_CGTT_MGCG_OVERRIDE, data); + + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL); + + if (enable) + data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK; + else + data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK; + + if (def != data) + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data); +} + +static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev, + bool enable, int xcc_id) +{ + uint32_t def, data; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) + return; + + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), + regRLC_CGTT_MGCG_OVERRIDE); + + if (enable) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; + else + data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; + + if (def != data) + WREG32_SOC15(GC, GET_INST(GC, xcc_id), + regRLC_CGTT_MGCG_OVERRIDE, data); +} + static void gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable, int xcc_id) { uint32_t data, def; - amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); - /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { /* 1 - RLC_CGTT_MGCG_OVERRIDE */ @@ -2239,7 +2289,6 @@ gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, } } - amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); } static void @@ -2248,8 +2297,6 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, { uint32_t def, data; - amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); /* unset CGCG override */ @@ -2292,13 +2339,18 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); } - amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); } static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, bool enable, int xcc_id) { + amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); + if (enable) { + /* FGCG */ + gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); + gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); + /* CGCG/CGLS should be enabled after MGCG/MGLS * === MGCG + MGLS === */ @@ -2316,7 +2368,14 @@ static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, /* === MGCG + MGLS === */ gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id); + + /* FGCG */ + gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); + gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); } + + amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); + return 0; } From c9a502e981a961053f3f873b14677d95e804251e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 29 Nov 2022 12:45:26 -0500 Subject: [PATCH 395/861] drm/amdgpu: Allocate GART table in RAM for AMD APU Some AMD APUs may not have a dedicated VRAM. On such platforms the GART table should be allocated on the system memory. When real vram size is zero, place the GART table in system memory and create an SG BO to make it GPU accessible. v2: fix includes Reviewed-by: Felix Kuehling (rajneesh: removed set_memory_wc workaround) Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Harish Kasiviswanathan Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 137 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 2 + drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 23 +++- 3 files changed, 156 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 6b12f4a75fc3..a070adf30c88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -35,6 +35,7 @@ #endif #include "amdgpu.h" #include +#include /* * GART @@ -102,6 +103,142 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) adev->dummy_page_addr = 0; } +/** + * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table + * + * @adev: amdgpu_device pointer + * + * Allocate system memory for GART page table for ASICs that don't have + * dedicated VRAM. + * Returns 0 for success, error for failure. + */ +int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) +{ + unsigned int order = get_order(adev->gart.table_size); + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; + struct amdgpu_bo *bo = NULL; + struct sg_table *sg = NULL; + struct amdgpu_bo_param bp; + dma_addr_t dma_addr; + struct page *p; + int ret; + + if (adev->gart.bo != NULL) + return 0; + + p = alloc_pages(gfp_flags, order); + if (!p) + return -ENOMEM; + + /* If the hardware does not support UTCL2 snooping of the CPU caches + * then set_memory_wc() could be used as a workaround to mark the pages + * as write combine memory. + */ + dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&adev->pdev->dev, dma_addr)) { + dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n"); + __free_pages(p, order); + p = NULL; + return -EFAULT; + } + + dev_info(adev->dev, "%s dma_addr:%llx\n", __func__, dma_addr); + /* Create SG table */ + sg = kmalloc(sizeof(*sg), GFP_KERNEL); + if (!sg) { + ret = -ENOMEM; + goto error; + } + ret = sg_alloc_table(sg, 1, GFP_KERNEL); + if (ret) + goto error; + + sg_dma_address(sg->sgl) = dma_addr; + sg->sgl->length = adev->gart.table_size; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->sgl->dma_length = adev->gart.table_size; +#endif + /* Create SG BO */ + memset(&bp, 0, sizeof(bp)); + bp.size = adev->gart.table_size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_CPU; + bp.type = ttm_bo_type_sg; + bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + bp.flags = 0; + ret = amdgpu_bo_create(adev, &bp, &bo); + if (ret) + goto error; + + bo->tbo.sg = sg; + bo->tbo.ttm->sg = sg; + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + + ret = amdgpu_bo_reserve(bo, true); + if (ret) { + dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret); + goto error; + } + + ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); + WARN(ret, "Pinning the GART table failed"); + if (ret) + goto error_resv; + + adev->gart.bo = bo; + adev->gart.ptr = page_to_virt(p); + /* Make GART table accessible in VMID0 */ + ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo); + if (ret) + amdgpu_gart_table_ram_free(adev); + amdgpu_bo_unreserve(bo); + + return 0; + +error_resv: + amdgpu_bo_unreserve(bo); +error: + amdgpu_bo_unref(&bo); + if (sg) { + sg_free_table(sg); + kfree(sg); + } + __free_pages(p, order); + return ret; +} + +/** + * amdgpu_gart_table_ram_free - free gart page table system ram + * + * @adev: amdgpu_device pointer + * + * Free the system memory used for the GART page tableon ASICs that don't + * have dedicated VRAM. + */ +void amdgpu_gart_table_ram_free(struct amdgpu_device *adev) +{ + unsigned int order = get_order(adev->gart.table_size); + struct sg_table *sg = adev->gart.bo->tbo.sg; + struct page *p; + int ret; + + ret = amdgpu_bo_reserve(adev->gart.bo, false); + if (!ret) { + amdgpu_bo_unpin(adev->gart.bo); + amdgpu_bo_unreserve(adev->gart.bo); + } + amdgpu_bo_unref(&adev->gart.bo); + sg_free_table(sg); + kfree(sg); + p = virt_to_page(adev->gart.ptr); + __free_pages(p, order); + + adev->gart.ptr = NULL; +} + /** * amdgpu_gart_table_vram_alloc - allocate vram for gart page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 8fea3e04e411..8283d682f543 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -51,6 +51,8 @@ struct amdgpu_gart { uint64_t gart_pte_flags; }; +int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 444441c6b7e3..aca8489635b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1688,12 +1688,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | AMDGPU_PTE_EXECUTABLE; - r = amdgpu_gart_table_vram_alloc(adev); - if (r) - return r; + if (!adev->gmc.real_vram_size) { + dev_info(adev->dev, "Put GART in system memory for APU\n"); + r = amdgpu_gart_table_ram_alloc(adev); + if (r) + dev_err(adev->dev, "Failed to allocate GART in system memory\n"); + } else { + r = amdgpu_gart_table_vram_alloc(adev); + if (r) + return r; - if (adev->gmc.xgmi.connected_to_cpu) { - r = amdgpu_gmc_pdb0_alloc(adev); + if (adev->gmc.xgmi.connected_to_cpu) + r = amdgpu_gmc_pdb0_alloc(adev); } return r; @@ -1902,7 +1908,12 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - amdgpu_gart_table_vram_free(adev); + if (!adev->gmc.real_vram_size) { + dev_info(adev->dev, "Put GART in system memory for APU free\n"); + amdgpu_gart_table_ram_free(adev); + } else { + amdgpu_gart_table_vram_free(adev); + } amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); amdgpu_bo_fini(adev); From 44cbc4534bbe7cc1f7dd25976a044c7a84628978 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 1 Dec 2022 17:27:47 +0530 Subject: [PATCH 396/861] drm/amdgpu: Make VRAM discovery read optional When overridden with module param, directly read discovery info from discovery binary instead of reading from VRAM. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 5b6088fc2b2b..700750433d0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -301,28 +301,27 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (!adev->mman.discovery_bin) return -ENOMEM; - amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); - - if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) { - /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */ - if (amdgpu_discovery == 2) - dev_info(adev->dev, "force read ip discovery binary from file"); - else - dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); - - /* retry read ip discovery binary from file */ + /* Read from file if it is the preferred option */ + if (amdgpu_discovery == 2) { + dev_info(adev->dev, "use ip discovery information from file"); r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); + if (r) { dev_err(adev->dev, "failed to read ip discovery binary from file\n"); r = -EINVAL; goto out; } - /* check the ip discovery binary signature */ - if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { - dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); - r = -EINVAL; - goto out; - } + + } else { + amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); + } + + /* check the ip discovery binary signature */ + if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + dev_err(adev->dev, + "get invalid ip discovery binary signature\n"); + r = -EINVAL; + goto out; } bhdr = (struct binary_header *)adev->mman.discovery_bin; From 73fa255328263e525d7d46d511b088e80e3cd579 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 28 Nov 2022 12:02:14 +0530 Subject: [PATCH 397/861] drm/amdgpu: Use discovery to get XCC/SDMA mask Get information about active XCC and SDMAs from discovery table. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 700750433d0f..a1f06945b8ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -574,6 +574,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); (*umc_harvest_count)++; break; + case GC_HWID: + adev->gfx.xcc_mask &= + ~(1U << harvest_info->list[i].number_instance); + break; + case SDMA0_HWID: + adev->sdma.sdma_mask &= + ~(1U << harvest_info->list[i].number_instance); + break; default: break; } @@ -1106,6 +1114,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) return r; } + adev->gfx.xcc_mask = 0; + adev->sdma.sdma_mask = 0; bhdr = (struct binary_header *)adev->mman.discovery_bin; ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); @@ -1164,12 +1174,16 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(ip->hw_id) == SDMA1_HWID || le16_to_cpu(ip->hw_id) == SDMA2_HWID || le16_to_cpu(ip->hw_id) == SDMA3_HWID) { - if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES) + if (adev->sdma.num_instances < + AMDGPU_MAX_SDMA_INSTANCES) { adev->sdma.num_instances++; - else + adev->sdma.sdma_mask |= + (1U << ip->instance_number); + } else { dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", adev->sdma.num_instances + 1, AMDGPU_MAX_SDMA_INSTANCES); + } } if (le16_to_cpu(ip->hw_id) == UMC_HWID) { @@ -1177,6 +1191,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) adev->umc.node_inst_num++; } + if (le16_to_cpu(ip->hw_id) == GC_HWID) + adev->gfx.xcc_mask |= + (1U << ip->instance_number); + for (k = 0; k < num_base_address; k++) { /* * convert the endianness of base addresses in place, @@ -1243,7 +1261,8 @@ static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) * so read harvest bit per IP data structure to set * harvest configuration. */ - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) { + if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) && + adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) { if ((adev->pdev->device == 0x731E && (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) || From 45ed97ad36b744dfc2754c47cfd4423aab6322a2 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 12 Dec 2022 12:29:04 -0500 Subject: [PATCH 398/861] drm/amdgpu: increase MAX setting to hold more jpeg instances vcn_v4_0_3 increased jpeg instances, need increasing MAX resources setting accordlingly. Signed-off-by: James Zhu Acked-by Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index a29a018ec84e..3c3ae2b4dbc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -325,14 +325,14 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, /* IH: 0x1A0 ~ 0x1AF */ AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0, - /* VCN: 0x1B0 ~ 0x1C2 */ + /* VCN: 0x1B0 ~ 0x1D4 */ AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0, - AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1C2, + AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1D4, AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, - AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1C2, + AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1D4, AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF } AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 5c200a508fa3..bb700a2b97c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -26,7 +26,7 @@ #include "amdgpu_ras.h" -#define AMDGPU_MAX_JPEG_INSTANCES 2 +#define AMDGPU_MAX_JPEG_INSTANCES 4 #define AMDGPU_MAX_JPEG_RINGS 8 #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 8f4b416a92e6..bed118d7add0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -37,7 +37,7 @@ struct amdgpu_job; struct amdgpu_vm; /* max number of rings */ -#define AMDGPU_MAX_RINGS 96 +#define AMDGPU_MAX_RINGS 102 #define AMDGPU_MAX_HWIP_RINGS 32 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_SW_GFX_RINGS 2 From 2d7f1d51c1e9812c6a085b6e5bfd99e31b1442cb Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 12 Dec 2022 12:31:23 -0500 Subject: [PATCH 399/861] drm/amdgpu: add fwlog support on vcn_v4_0_3 Add fwlog support on vcn_v4_0_3. Signed-off-by: James Zhu Acked-by Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 8db50a50b868..40de2852c5ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -118,6 +118,9 @@ static int vcn_v4_0_3_sw_init(void *handle) fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; fw_shared->sq.is_enabled = cpu_to_le32(true); + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); } if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) From da044aaeb31ac11e733a667763487508433f3ede Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 17 Dec 2022 10:51:04 -0500 Subject: [PATCH 400/861] drm/amdgpu: add unified queue support on vcn_v4_0_3 Add unified queue support on vcn_v4_0_3. Signed-off-by: James Zhu Acked-by Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 9 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 173 +++++++++++++----------- 2 files changed, 102 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 72eb12aa2e8d..2e28b19e741e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1004,11 +1004,14 @@ error: int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; long r; - r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); - if (r) - goto error; + if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(4, 0, 3)) { + r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); + if (r) + goto error; + } r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 40de2852c5ca..18320eebdb4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -31,7 +31,6 @@ #include "soc15d.h" #include "soc15_hw_ip.h" #include "vcn_v2_0.h" -#include "vcn_sw_ring.h" #include "vcn/vcn_4_0_3_offset.h" #include "vcn/vcn_4_0_3_sh_mask.h" @@ -45,12 +44,13 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 -static void vcn_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev); +static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v4_0_3_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); +static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v4_0_3_early_init - set function pointers @@ -63,7 +63,10 @@ static int vcn_v4_0_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - vcn_v4_0_3_set_dec_ring_funcs(adev); + /* re-use enc ring as unified ring */ + adev->vcn.num_enc_rings = 1; + + vcn_v4_0_3_set_unified_ring_funcs(adev); vcn_v4_0_3_set_irq_funcs(adev); return 0; @@ -94,7 +97,7 @@ static int vcn_v4_0_3_sw_init(void *handle) /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID__UVD_TRAP, &adev->vcn.inst->irq); + VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); if (r) return r; @@ -104,11 +107,11 @@ static int vcn_v4_0_3_sw_init(void *handle) if (adev->vcn.harvest_config & (1 << i)) continue; - ring = &adev->vcn.inst[i].ring_dec; + ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i; ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); - sprintf(ring->name, "vcn_dec_%d", i); + sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); @@ -116,7 +119,7 @@ static int vcn_v4_0_3_sw_init(void *handle) return r; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->present_flag_0 = 0; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); fw_shared->sq.is_enabled = cpu_to_le32(true); if (amdgpu_vcnfw_log) @@ -179,17 +182,17 @@ static int vcn_v4_0_3_hw_init(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - ring = &adev->vcn.inst[i].ring_dec; + ring = &adev->vcn.inst[i].ring_enc[0]; - if (ring->use_doorbell) + if (ring->use_doorbell) { adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, adev->vcn.inst[i].aid_id); - if (ring->use_doorbell) - WREG32_SOC15(VCN, ring->me, regVCN_RB4_DB_CTRL, - ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | - VCN_RB4_DB_CTRL__EN_MASK); + WREG32_SOC15(VCN, ring->me, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + } r = amdgpu_ring_test_helper(ring); if (r) @@ -731,31 +734,31 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); - ring = &adev->vcn.inst[inst_idx].ring_dec; + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO4, + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI4, + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); - tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); + tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR4, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4, 0); - ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); + ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); - tmp |= VCN_RB_ENABLE__RB4_EN_MASK; + tmp |= VCN_RB_ENABLE__RB_EN_MASK; WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); - - WREG32_SOC15(VCN, inst_idx, regUVD_SCRATCH2, 0); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); /*resetting done, fw can check RB ring */ fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); @@ -902,31 +905,31 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - ring = &adev->vcn.inst[i].ring_dec; + ring = &adev->vcn.inst[i].ring_enc[0]; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO4, + WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI4, + WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); - tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); + tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, i, regUVD_RB_RPTR4, 0); - WREG32_SOC15(VCN, i, regUVD_RB_WPTR4, 0); + WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); - tmp |= VCN_RB_ENABLE__RB4_EN_MASK; + tmp |= VCN_RB_ENABLE__RB_EN_MASK; WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); - ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR4); + ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); fw_shared->sq.queue_mode &= cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); @@ -951,8 +954,8 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ - tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR4, tmp, 0xFFFFFFFF); + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); @@ -972,15 +975,20 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) */ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; uint32_t tmp; int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - r = vcn_v4_0_3_stop_dpg_mode(adev, i); - goto Done; + vcn_v4_0_3_stop_dpg_mode(adev, i); + continue; } /* wait for vcn idle */ @@ -1000,7 +1008,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); - tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); if (r) @@ -1059,101 +1067,112 @@ static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, } /** - * vcn_v4_0_3_dec_ring_get_rptr - get read pointer + * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer * * @ring: amdgpu_ring pointer * - * Returns the current hardware read pointer + * Returns the current hardware unified read pointer */ -static uint64_t vcn_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) +static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR4); + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); } /** - * vcn_v4_0_3_dec_ring_get_wptr - get write pointer + * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer * * @ring: amdgpu_ring pointer * - * Returns the current hardware write pointer + * Returns the current hardware unified write pointer */ -static uint64_t vcn_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) +static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; + return *ring->wptr_cpu_addr; else - return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR4); + return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); } /** - * vcn_v4_0_3_dec_ring_set_wptr - set write pointer + * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer * * @ring: amdgpu_ring pointer * - * Commits the write pointer to the hardware + * Commits the enc write pointer to the hardware */ -static void vcn_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) +static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR4, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); } } -static const struct amdgpu_ring_funcs vcn_v4_0_3_dec_sw_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_DEC, +static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, - .nop = VCN_DEC_SW_CMD_NO_OP, - .get_rptr = vcn_v4_0_3_dec_ring_get_rptr, - .get_wptr = vcn_v4_0_3_dec_ring_get_wptr, - .set_wptr = vcn_v4_0_3_dec_ring_set_wptr, + .nop = VCN_ENC_CMD_NO_OP, + .get_rptr = vcn_v4_0_3_unified_ring_get_rptr, + .get_wptr = vcn_v4_0_3_unified_ring_get_wptr, + .set_wptr = vcn_v4_0_3_unified_ring_set_wptr, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + - VCN_SW_RING_EMIT_FRAME_SIZE, - .emit_ib_size = 5, /* vcn_dec_sw_ring_emit_ib */ - .emit_ib = vcn_dec_sw_ring_emit_ib, - .emit_fence = vcn_dec_sw_ring_emit_fence, - .emit_vm_flush = vcn_dec_sw_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_sw_ring_test_ring, - .test_ib = amdgpu_vcn_dec_sw_ring_test_ib, + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_unified_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, - .insert_end = vcn_dec_sw_ring_insert_end, + .insert_end = vcn_v2_0_enc_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_vcn_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_dec_sw_ring_emit_wreg, - .emit_reg_wait = vcn_dec_sw_ring_emit_reg_wait, + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; /** - * vcn_v4_0_3_set_dec_ring_funcs - set dec ring functions + * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions * * @adev: amdgpu_device pointer * - * Set decode ring functions + * Set unified ring functions */ -static void vcn_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) +static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev) { int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].ring_dec.funcs = &vcn_v4_0_3_dec_sw_ring_vm_funcs; - adev->vcn.inst[i].ring_dec.me = i; + adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs; + adev->vcn.inst[i].ring_enc[0].me = i; adev->vcn.inst[i].aid_id = i / adev->vcn.num_inst_per_aid; } - DRM_DEV_INFO(adev->dev, "VCN decode(Software Ring) is enabled in VM mode\n"); + DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n"); } /** @@ -1276,7 +1295,7 @@ static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev, } /** - * vcn_v4_0_process_interrupt - process VCN block interrupt + * vcn_v4_0_3_process_interrupt - process VCN block interrupt * * @adev: amdgpu_device pointer * @source: interrupt sources @@ -1295,8 +1314,8 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); switch (entry->src_id) { - case VCN_4_0__SRCID__UVD_TRAP: - amdgpu_fence_process(&adev->vcn.inst[i].ring_dec); + case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.inst[i].ring_enc[0]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", From e40b4b9a474887653263c138b0172b0e2fde57cb Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 12 Dec 2022 13:14:05 -0500 Subject: [PATCH 401/861] drm/amdgpu: enable indirect_sram mode on vcn_v4_0_3 Enable indirect_sram mode on vcn_v4_0_3. Signed-off-by: James Zhu Acked-by Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 2e28b19e741e..daa69ae766d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -113,18 +113,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); - switch (adev->ip_versions[UVD_HWIP][0]) { - case IP_VERSION(4, 0, 3): - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = false; - break; - default: - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - } + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; /* * Some Steam Deck's BIOS versions are incompatible with the From ed1f42f03c46767df7f55d6a75c39051a55cc656 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 17 Dec 2022 19:44:15 -0500 Subject: [PATCH 402/861] drm/amdgpu: enable vcn/jpeg on vcn_v4_0_3 Enable vcn/jpeg on vcn_v4_0_3. Signed-off-by: James Zhu Acked-by Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index a1f06945b8ee..d8f55a99646e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -76,6 +76,8 @@ #include "jpeg_v3_0.h" #include "vcn_v4_0.h" #include "jpeg_v4_0.h" +#include "vcn_v4_0_3.h" +#include "jpeg_v4_0_3.h" #include "amdgpu_vkms.h" #include "mes_v10_1.h" #include "mes_v11_0.h" @@ -1947,7 +1949,11 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 0, 4): amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); - return 0; + break; + case IP_VERSION(4, 0, 3): + amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); + break; default: dev_err(adev->dev, "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", From 30b52995c294b3c9a002ff2f90f773cbf8850714 Mon Sep 17 00:00:00 2001 From: Asad kamal Date: Tue, 3 Jan 2023 13:14:58 +0800 Subject: [PATCH 403/861] drm/amdgpu: Remove SMU powergate message call for SDMA SDMA v4.4.2 doesn't need explicit power gating control through PMFW Signed-off-by: Asad kamal Reviewed-by: Hawking Zhang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 184eb7902722..7c91cbd91ac8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1398,10 +1398,6 @@ static int sdma_v4_4_2_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; uint32_t inst_mask; - /* TODO: Check if this is needed */ - if (adev->flags & AMD_IS_APU) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); - inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); if (!amdgpu_sriov_vf(adev)) sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask); From d839a158b2480814bc438f9f46f440a7b9f63cb6 Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Thu, 5 Jan 2023 10:58:07 -0500 Subject: [PATCH 404/861] drm/amdgpu: Correct dGPU MTYPE settings for gfx943 Revert temporary dGPU VRAM MTYPE setting and align with expected coherency protocol. Signed-off-by: Graham Sider Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 +++++---------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++------ 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index aca8489635b8..b6c500be6f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1238,17 +1238,12 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, mtype = MTYPE_RW; } else { /* dGPU */ - /* - if ((mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && - bo_adev == adev) - mapping_flags |= AMDGPU_VM_MTYPE_RW; + if (is_vram && bo_adev == adev) + mtype = MTYPE_RW; + else if (is_vram) + mtype = MTYPE_NC; else - */ - /* Temporarily comment out above lines and use MTYPE_NC - * on both VRAM and system memory access until - * MTYPE_RW can properly work on VRAM access - */ - mtype = MTYPE_NC; + mtype = MTYPE_UC; } break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 6daba0582bf3..2b79849ddd30 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1198,13 +1198,9 @@ svm_range_get_pte_flags(struct kfd_node *node, if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { - /* local HBM region close to partition with a workaround - * for Endpoint systems. - */ + /* local HBM region close to partition */ if (bo_node == node) - mapping_flags |= - (node->adev->flags & AMD_IS_APU) ? - AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_NC; + mapping_flags |= AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) mapping_flags |= AMDGPU_VM_MTYPE_NC; From 52c293ab0653b3e57d2202a5002f59593ed12d1b Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 10 Jan 2023 09:52:53 +0530 Subject: [PATCH 405/861] drm/amdgpu: Populate VCN/JPEG harvest information Certain instances of VCN/JPEG IPs may not be usable. Fetch the information from harvest table. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index d8f55a99646e..6701f17a4db6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -563,10 +563,10 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, switch (le16_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: (*vcn_harvest_count)++; - if (harvest_info->list[i].number_instance == 0) - adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; - else - adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + adev->vcn.harvest_config |= + (1 << harvest_info->list[i].number_instance); + adev->jpeg.harvest_config |= + (1 << harvest_info->list[i].number_instance); break; case DMU_HWID: adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; From f471de2586e8ef388eea2cafa911695d94ec1d88 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 19 Dec 2022 20:11:11 -0500 Subject: [PATCH 406/861] drm/amdgpu: vcn_v4_0_3 load vcn fw once for all AIDs Signed-off-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 3 +++ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index daa69ae766d3..57dabfe1a1be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1046,6 +1046,9 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) adev->firmware.ucode[idx].fw = adev->vcn.fw; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(4, 0, 3)) + break; } dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 18320eebdb4c..746df23b2eaa 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -730,7 +730,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); From 1bd99ca2695a76f15d7f7862d6ef878588e854dc Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 10 Jan 2023 09:01:33 -0500 Subject: [PATCH 407/861] drm/amdgpu: increase AMDGPU_MAX_HWIP_RINGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [WA] Increase AMDGPU_MAX_HWIP_RINGS to 64 to support more compute ring resource. Later need redesign with queue/prirority/scheduler factors to reduce AMDGPU_MAX_HWIP_RINGS. Signed-off-by: James Zhu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bed118d7add0..add7cc2831b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -38,7 +38,7 @@ struct amdgpu_vm; /* max number of rings */ #define AMDGPU_MAX_RINGS 102 -#define AMDGPU_MAX_HWIP_RINGS 32 +#define AMDGPU_MAX_HWIP_RINGS 64 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_SW_GFX_RINGS 2 #define AMDGPU_MAX_COMPUTE_RINGS 8 From 13a94f3f130ee4db6b4d2a0843104807a7299aa4 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 10 Jan 2023 09:05:35 -0500 Subject: [PATCH 408/861] drm/amdgpu: add num_xcps return Add num_xcps return. Signed-off-by: James Zhu Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 0d7bc212def1..6591d39c6518 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -318,6 +318,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev->nbio.funcs->set_compute_partition_mode(adev, mode); ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + + *num_xcps = num_xcc / num_xcc_per_xcp; unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); From b2ef2fdffed2a7fd5bf3f178a6a0427487dba5dd Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Thu, 5 Jan 2023 11:39:34 -0500 Subject: [PATCH 409/861] drm/amdkfd: Report XGMI IOLINKs for GFXIP9.4.3 GFXIP 9.4.3 could be in APU or carveout mode but we cannot use the xgmi.connected_to_cpu flag to identify the iolinks type. Use appropriate APU or Carveout mode based condition to report xgmi connection in kfd topology. Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 5 ++++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 5 ++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index dc93a67257e1..16475921587b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2005,7 +2005,10 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, /* Fill in IOLINK subtype. * TODO: Fill-in other fields of iolink subtype */ - if (kdev->adev->gmc.xgmi.connected_to_cpu) { + if (kdev->adev->gmc.xgmi.connected_to_cpu || + (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 3) && + kdev->adev->smuio.funcs->get_pkg_type(kdev->adev) == + AMDGPU_PKG_TYPE_APU)) { bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3); int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT : KFD_CRAT_INTRA_SOCKET_WEIGHT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index d3e70341dfad..5373a79ac6a1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1327,9 +1327,8 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) continue; /* Include the CPU peer in GPU hive if connected over xGMI. */ - if (!peer_dev->gpu && !peer_dev->node_props.hive_id && - dev->node_props.hive_id && - dev->gpu->adev->gmc.xgmi.connected_to_cpu) + if (!peer_dev->gpu && + link->iolink_type == CRAT_IOLINK_TYPE_XGMI) peer_dev->node_props.hive_id = dev->node_props.hive_id; list_for_each_entry(inbound_link, &peer_dev->io_link_props, From 7389c75114c53b061d686f19dff5833adaf96cb8 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 17 Jan 2023 16:54:49 +0530 Subject: [PATCH 410/861] drm/amdgpu: Keep SDMAv4.4.2 active during reset During ASIC wide reset, SDMA shouldn't be clockgated and be ready to accept freeze requests from PMFW. For that, don't stop SDMA engine during reset and keep the clocks active. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 7c91cbd91ac8..729e26a4a2e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -566,6 +566,11 @@ static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable, sdma_v4_4_2_inst_rlc_stop(adev, inst_mask); if (adev->sdma.has_page_queue) sdma_v4_4_2_inst_page_stop(adev, inst_mask); + + /* SDMA FW needs to respond to FREEZE requests during reset. + * Keep it running during reset */ + if (!amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + return; } for_each_inst(i, inst_mask) { @@ -1435,6 +1440,9 @@ static int sdma_v4_4_2_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_in_reset(adev)) + sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); + return sdma_v4_4_2_hw_fini(adev); } From 233bb3733bd43966696f4a5e95129476e86bf4e3 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 19 Jan 2023 14:47:22 +0530 Subject: [PATCH 411/861] drm/amdgpu: Use unique doorbell range per xcc Program different ranges in each XCC with MEC_DOORBELL_RANGE_LOWER/HIGHER. Keeping the same range causes CPF in other XCCs also to be busy when an IB packet is submitted to KCQ. Only the XCC which processes the packet comes back to idle afterwards and this causes other CPs not be idle. This in turn affects clockgating behavior as RLC doesn't get idle interrupt. LOWER/HIGHER covers only KIQ/KCQs which are per XCC queues. Assigning different ranges doesn't seem to have any side effect as user queue ranges are outside of this range. User queue tests - PM4 through KFD and AQL through rocr - have the same results after this change. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 35 +++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 5 ++- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 31 +++++++++++----- 4 files changed, 47 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 3c3ae2b4dbc8..f637574644c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -86,6 +86,8 @@ struct amdgpu_doorbell_index { uint32_t max_assignment; /* Per engine SDMA doorbell size in dword */ uint32_t sdma_doorbell_range; + /* Per xcc doorbell size for KIQ/KCQ */ + uint32_t xcc_doorbell_range; }; typedef enum _AMDGPU_DOORBELL_ASSIGNMENT @@ -309,28 +311,31 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_INVALID = 0xFFFF } AMDGPU_DOORBELL64_ASSIGNMENT; -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 -{ - /* KIQ: 0~7 for maximum 8 XCD */ - AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000, - AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x008, - AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x009, - /* Compute: 0x0A ~ 0x49 */ - AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x00A, - AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x049, - AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x04A, - AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x0C9, +typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { + /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */ + + /* KIQ/HIQ/DIQ */ + AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000, + AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x001, + AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x002, + /* Compute: 0x08 ~ 0x20 */ + AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x008, + AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x00F, + AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x010, + AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x01F, + AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE = 0x020, + /* SDMA: 0x100 ~ 0x19F */ - AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100, - AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100, + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, /* IH: 0x1A0 ~ 0x1AF */ AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0, /* VCN: 0x1B0 ~ 0x1D4 */ AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0, AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1D4, - AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, - AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, + AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, + AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1D4, AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f895a4b8ca0d..70c6099353b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -316,7 +316,10 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB(xcc_id); - ring->doorbell_index = (adev->doorbell_index.kiq + xcc_id) << 1; + ring->doorbell_index = + (adev->doorbell_index.kiq + + xcc_id * adev->doorbell_index.xcc_doorbell_range) + << 1; r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 6591d39c6518..55a6ebb940ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -44,6 +44,7 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START; adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; + adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE; adev->doorbell_index.sdma_doorbell_range = 20; for (i = 0; i < adev->sdma.num_instances; i++) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 55d99c4ea48c..557a2458ef5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -729,8 +729,10 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; unsigned int hw_prio; + uint32_t xcc_doorbell_start; - ring = &adev->gfx.compute_ring[ring_id]; + ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + + ring_id]; /* mec0 is me1 */ ring->xcc_id = xcc_id; @@ -740,9 +742,12 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; - ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr - + (ring_id * GFX9_MEC_HPD_SIZE); + xcc_doorbell_start = adev->doorbell_index.mec_ring0 + + xcc_id * adev->doorbell_index.xcc_doorbell_range; + ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; + ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + + (ring_id + xcc_id * adev->gfx.num_compute_rings) * + GFX9_MEC_HPD_SIZE; ring->vm_hub = AMDGPU_GFXHUB(xcc_id); sprintf(ring->name, "comp_%d.%d.%d.%d", ring->xcc_id, ring->me, ring->pipe, ring->queue); @@ -801,8 +806,8 @@ static int gfx_v9_4_3_sw_init(void *handle) } /* set up the compute queues - allocate horizontally across pipes */ - ring_id = 0; for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + ring_id = 0; for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; @@ -1654,10 +1659,18 @@ static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, /* enable the doorbell if requested */ if (ring->use_doorbell) { - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER, - (adev->doorbell_index.kiq * 2) << 2); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER, - (adev->doorbell_index.userqueue_end * 2) << 2); + WREG32_SOC15( + GC, GET_INST(GC, xcc_id), + regCP_MEC_DOORBELL_RANGE_LOWER, + ((adev->doorbell_index.kiq + + xcc_id * adev->doorbell_index.xcc_doorbell_range) * + 2) << 2); + WREG32_SOC15( + GC, GET_INST(GC, xcc_id), + regCP_MEC_DOORBELL_RANGE_UPPER, + ((adev->doorbell_index.userqueue_end + + xcc_id * adev->doorbell_index.xcc_doorbell_range) * + 2) << 2); } WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, From b7c7011e67b09efc486b1de38f6bfbed75139989 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 19 Jan 2023 15:00:45 +0530 Subject: [PATCH 412/861] drm/amdgpu: Enable CGCG/LS for GC 9.4.3 Enable coarse grain clockgating/light sleep for GC v9.4.3. Remove programming that is not meant for GC 9.4.3. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 14 +++++--------- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 557a2458ef5e..52041d4cb14a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2250,11 +2250,9 @@ gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); - /* only for Vega10 & Raven1 */ - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; - if (def != data) WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); @@ -2311,6 +2309,7 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, uint32_t def, data; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { + def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); /* unset CGCG override */ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; @@ -2325,12 +2324,9 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, /* enable cgcg FSM(0x0000363F) */ def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); - if (adev->asic_type == CHIP_ARCTURUS) - data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | - RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; - else - data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | - RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + data = (0x36 + << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 06a18b2f6e04..4138d9634266 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1121,7 +1121,8 @@ static int soc15_common_early_init(void *handle) case IP_VERSION(9, 4, 3): adev->asic_funcs = &aqua_vanjaram_asic_funcs; adev->cg_flags = - AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | From 5ca1ceebab140b36928dabc9a5b36b9a3010e844 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Sat, 21 Jan 2023 15:47:11 -0500 Subject: [PATCH 413/861] drm/amd: fix compiler error to support older compilers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ‘for’ loop initial declarations are only allowed in C99 or C11 mode Signed-off-by: Harish Kasiviswanathan Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 55a6ebb940ba..fdc728f678d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -360,7 +360,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { - u32 inst_mask = adev->sdma.sdma_mask; + u32 mask, inst_mask = adev->sdma.sdma_mask; int ret, i, num_inst; /* generally 1 AID supports 4 instances */ @@ -370,7 +370,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) adev->aid_mask = i = 1; inst_mask >>= adev->sdma.num_inst_per_aid; - for (const u32 mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; + for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { if ((inst_mask & mask) == mask) adev->aid_mask |= (1 << i); From c1d3f627ff33bf1ae145209030a114c4985beddf Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 20 Jan 2023 12:42:00 +0530 Subject: [PATCH 414/861] drm/amdgpu: Fix mqd init on GFX v9.4.3 For MQD init, an XCC's queue is selected with GRBM select. However, for initialization of MQD, values read from logical XCC0 registers are used. This results in garbage values being read from XCC0 whose queue is not selected. Change to read from the right XCC for MQD initialization. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 52041d4cb14a..1f1268cd5e09 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1456,7 +1456,7 @@ static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd } } -static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) +static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -1483,14 +1483,14 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_EOP_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); mqd->cp_hqd_eop_control = tmp; /* enable doorbell? */ - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL); if (ring->use_doorbell) { tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -1520,7 +1520,7 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); /* set MQD vmid to 0 */ - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_MQD_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL); tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); mqd->cp_mqd_control = tmp; @@ -1530,7 +1530,7 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); /* set up the HQD, this is similar to CP_RB0_CNTL */ - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, (order_base_2(ring->ring_size / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, @@ -1557,23 +1557,23 @@ static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring) /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ ring->wptr = 0; - mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_RPTR); + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR); /* set the vmid for the queue */ mqd->cp_hqd_vmid = 0; - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PERSISTENT_STATE); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE); tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); mqd->cp_hqd_persistent_state = tmp; /* set MIN_IB_AVAIL_SIZE */ - tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_IB_CONTROL); + tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; /* set static priority for a queue/ring */ gfx_v9_4_3_mqd_set_priority(ring, mqd); - mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_QUANTUM); + mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM); /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. @@ -1771,7 +1771,7 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); - gfx_v9_4_3_mqd_init(ring); + gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); @@ -1802,7 +1802,7 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); - gfx_v9_4_3_mqd_init(ring); + gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); From 4482d3c94d7f1d6912521e6de23bb051bfcd084d Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Wed, 12 Oct 2022 21:58:29 -0400 Subject: [PATCH 415/861] drm/ttm: add NUMA node id to the pool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows backing ttm_tt structure with pages from different NUMA pools. Tested-by: Graham Sider Reviewed-by: Felix Kuehling Signed-off-by: Christian König Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_device.c | 2 +- drivers/gpu/drm/ttm/ttm_pool.c | 13 ++++++++----- include/drm/ttm/ttm_pool.h | 4 +++- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 64a59f46f6c3..43e27ab77f95 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -213,7 +213,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, bdev->funcs = funcs; ttm_sys_man_init(bdev); - ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); + ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32); bdev->vma_manager = vma_manager; spin_lock_init(&bdev->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 18c342a919a2..afc4476c446f 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -98,7 +98,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, __GFP_KSWAPD_RECLAIM; if (!pool->use_dma_alloc) { - p = alloc_pages(gfp_flags, order); + p = alloc_pages_node(pool->nid, gfp_flags, order); if (p) p->private = order; return p; @@ -292,7 +292,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) { - if (pool->use_dma_alloc) + if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) return &pool->caching[caching].orders[order]; #ifdef CONFIG_X86 @@ -550,29 +550,32 @@ EXPORT_SYMBOL(ttm_pool_free); * * @pool: the pool to initialize * @dev: device for DMA allocations and mappings + * @nid: NUMA node to use for allocations * @use_dma_alloc: true if coherent DMA alloc should be used * @use_dma32: true if GFP_DMA32 should be used * * Initialize the pool and its pool types. */ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, - bool use_dma_alloc, bool use_dma32) + int nid, bool use_dma_alloc, bool use_dma32) { unsigned int i, j; WARN_ON(!dev && use_dma_alloc); pool->dev = dev; + pool->nid = nid; pool->use_dma_alloc = use_dma_alloc; pool->use_dma32 = use_dma32; - if (use_dma_alloc) { + if (use_dma_alloc || nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (j = 0; j < TTM_DIM_ORDER; ++j) ttm_pool_type_init(&pool->caching[i].orders[j], pool, i, j); } } +EXPORT_SYMBOL(ttm_pool_init); /** * ttm_pool_fini - Cleanup a pool @@ -586,7 +589,7 @@ void ttm_pool_fini(struct ttm_pool *pool) { unsigned int i, j; - if (pool->use_dma_alloc) { + if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (j = 0; j < TTM_DIM_ORDER; ++j) ttm_pool_type_fini(&pool->caching[i].orders[j]); diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index ef09b23d29e3..23bd8be6d4f8 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -61,12 +61,14 @@ struct ttm_pool_type { * struct ttm_pool - Pool for all caching and orders * * @dev: the device we allocate pages for + * @nid: which numa node to use * @use_dma_alloc: if coherent DMA allocations should be used * @use_dma32: if GFP_DMA32 should be used * @caching: pools for each caching/order */ struct ttm_pool { struct device *dev; + int nid; bool use_dma_alloc; bool use_dma32; @@ -81,7 +83,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt); void ttm_pool_init(struct ttm_pool *pool, struct device *dev, - bool use_dma_alloc, bool use_dma32); + int nid, bool use_dma_alloc, bool use_dma32); void ttm_pool_fini(struct ttm_pool *pool); int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); From 8c45a8340dd097ea0d6be6f718c4882283d9645d Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Tue, 24 Jan 2023 10:10:14 -0500 Subject: [PATCH 416/861] drm/amdkfd: Cleanup KFD nodes creation kfd node allocation outside kfd->num_nodes loop is not needed and causes memory leak because kfd->num_nodes is at least equal to 1. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 7a963d0a34e2..d7cffd91f1d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -729,26 +729,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); - /* Allocate the KFD node */ - node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); - if (!node) { - dev_err(kfd_device, "Error allocating KFD node\n"); - goto node_alloc_error; - } - - node->adev = kfd->adev; - node->kfd = kfd; - node->kfd2kgd = kfd->kfd2kgd; - node->vm_info.vmid_num_kfd = vmid_num_kfd; - node->vm_info.first_vmid_kfd = first_vmid_kfd; - node->vm_info.last_vmid_kfd = last_vmid_kfd; - node->max_proc_per_quantum = max_proc_per_quantum; - atomic_set(&node->sram_ecc_flag, 0); - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); + + /* Allocate the KFD nodes */ for (i = 0; i < kfd->num_nodes; i++) { node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); if (!node) From 1698e200e88db96aef7d16aa3d63df68a209ffbd Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 2 Feb 2023 11:10:08 -0500 Subject: [PATCH 417/861] drm/amdkfd: bind cpu and hiveless gpu to a hive if xgmi connected If a CPU and GPU are xGMI connected but the GPU is hiveless with respect to other GPUs, create a new CPU-GPU hive using the GPU's PCI device location ID as the new hive ID to maintain fine grain memory access usage. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5373a79ac6a1..c7072fff778e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1328,8 +1328,15 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) /* Include the CPU peer in GPU hive if connected over xGMI. */ if (!peer_dev->gpu && - link->iolink_type == CRAT_IOLINK_TYPE_XGMI) + link->iolink_type == CRAT_IOLINK_TYPE_XGMI) { + /* + * If the GPU is not part of a GPU hive, use its pci + * device location as the hive ID to bind with the CPU. + */ + if (!dev->node_props.hive_id) + dev->node_props.hive_id = pci_dev_id(dev->gpu->adev->pdev); peer_dev->node_props.hive_id = dev->node_props.hive_id; + } list_for_each_entry(inbound_link, &peer_dev->io_link_props, list) { From 7b08b2e1caa04757bc7a35a46b3d5c83b1748423 Mon Sep 17 00:00:00 2001 From: Sonny Jiang Date: Tue, 31 Jan 2023 16:44:28 -0500 Subject: [PATCH 418/861] drm/amdgpu: add vcn_4_0_3 codec query Add support for vcn_4_0_3 video codec query Signed-off-by: Sonny Jiang Reviewed-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4138d9634266..5a63ec441a5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -153,6 +153,24 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode = .codec_array = rn_video_codecs_decode_array, }; +static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = { + .codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array), + .codec_array = vcn_4_0_3_video_codecs_decode_array, +}; + +static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = { + .codec_count = 0, + .codec_array = NULL, +}; + static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { @@ -185,6 +203,12 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, else *codecs = &rn_video_codecs_decode; return 0; + case IP_VERSION(4, 0, 3): + if (encode) + *codecs = &vcn_4_0_3_video_codecs_encode; + else + *codecs = &vcn_4_0_3_video_codecs_decode; + return 0; default: return -EINVAL; } From 35ff4301ebc37bd45c18edae08afd2983dc9a338 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 3 Feb 2023 14:38:33 +0800 Subject: [PATCH 419/861] drm/amdgpu: enable context empty interrupt on sdma v4.4.2 With SDMA_CTNL.CTXEMPTY_INT_ENABLE set, the F32 clock can be gated when SDMA finishes all job and goes to idle. And no specific interrupt handling is required in driver. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 729e26a4a2e7..7aa26e716a2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -890,6 +890,8 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, /* set utc l1 enable flag always to 1 */ temp = RREG32_SDMA(i, regSDMA_CNTL); temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1); + /* enable context empty interrupt during initialization */ + temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1); WREG32_SDMA(i, regSDMA_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { From cbf9e46ab0fec29ac39d05d9a87fa66122bc9783 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 3 Feb 2023 13:17:51 +0530 Subject: [PATCH 420/861] drm/amdgpu: Enable MGCG on SDMAv4.4.2 Enable clock gating on SDMAv4.4.2 versions. Leave memory light sleep to default. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 22 +++++++++++++--------- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 ++-- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 7aa26e716a2d..dca0beec0252 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1683,7 +1683,11 @@ static void sdma_v4_4_2_inst_update_medium_grain_light_sleep( uint32_t data, def; int i; - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { + /* leave as default if it is not driver controlled */ + if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) + return; + + if (enable) { for_each_inst(i, inst_mask) { /* 1-not override: enable sdma mem light sleep */ def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL); @@ -1708,12 +1712,14 @@ static void sdma_v4_4_2_inst_update_medium_grain_clock_gating( uint32_t data, def; int i; - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { + /* leave as default if it is not driver controlled */ + if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) + return; + + if (enable) { for_each_inst(i, inst_mask) { def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); - data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | - SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | - SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | + data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK | @@ -1725,9 +1731,7 @@ static void sdma_v4_4_2_inst_update_medium_grain_clock_gating( } else { for_each_inst(i, inst_mask) { def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); - data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | - SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | - SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | + data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK | SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK | @@ -1773,7 +1777,7 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) /* AMD_CG_SUPPORT_SDMA_MGCG */ data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_CLK_CTRL)); - if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK)) + if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK)) *flags |= AMD_CG_SUPPORT_SDMA_MGCG; /* AMD_CG_SUPPORT_SDMA_LS */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 5a63ec441a5b..100fdf5074b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1146,8 +1146,8 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &aqua_vanjaram_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | - AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | From 322a7e005db78b8a46ead91b7e3df3514cb658f0 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 2 Feb 2023 20:54:08 +0800 Subject: [PATCH 421/861] drm/amdgpu: Add common helper to query ras error (v2) Add common helper to query ras error status and log error information, including memory block id and erorr count. The helpers are applicable to IP blocks that follow the new ras error logging design. For IP blocks that don't support the new design, please still implement ip specific helper to query ras error. v2: optimize struct amdgpu_ras_err_status_reg_entry and the implementaion in helper (Lijo/Tao) Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 119 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 54 +++++++++++ 2 files changed, 173 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 22f401fd1901..57e86af0c906 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -3103,3 +3103,122 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, return 0; } + +void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name) +{ + if (!err_type_name) + return; + + switch (err_type) { + case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE: + sprintf(err_type_name, "correctable"); + break; + case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE: + sprintf(err_type_name, "uncorrectable"); + break; + default: + sprintf(err_type_name, "unknown"); + break; + } +} + +bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_entry, + uint32_t instance, + uint32_t *memory_id) +{ + uint32_t err_status_lo_data, err_status_lo_offset; + + if (!reg_entry) + return false; + + err_status_lo_offset = + AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, + reg_entry->seg_lo, reg_entry->reg_lo); + err_status_lo_data = RREG32(err_status_lo_offset); + + if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) && + !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG)) + return false; + + *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID); + + return true; +} + +bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_entry, + uint32_t instance, + unsigned long *err_cnt) +{ + uint32_t err_status_hi_data, err_status_hi_offset; + + if (!reg_entry) + return false; + + err_status_hi_offset = + AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, + reg_entry->seg_hi, reg_entry->reg_hi); + err_status_hi_data = RREG32(err_status_hi_offset); + + if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) && + !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG)) + return false; + + /* read err count */ + *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT); + + return true; +} + +void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_list, + uint32_t reg_list_size, + const struct amdgpu_ras_memory_id_entry *mem_list, + uint32_t mem_list_size, + uint32_t instance, + uint32_t err_type, + unsigned long *err_count) +{ + uint32_t memory_id; + unsigned long err_cnt; + char err_type_name[16]; + uint32_t i, j; + + for (i = 0; i < reg_list_size; i++) { + /* query err_cnt from err_status_hi */ + if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i], + instance, &err_cnt) || + !err_cnt) + continue; + + /* query memory_id from err_status_lo */ + if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i], + instance, &memory_id)) + continue; + + *err_count += err_cnt; + + /* log the errors */ + amdgpu_ras_get_error_type_name(err_type, err_type_name); + if (!mem_list) { + /* memory_list is not supported */ + dev_info(adev->dev, + "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n", + err_cnt, err_type_name, + reg_list[i].block_name, + instance, memory_id); + } else { + for (j = 0; j < mem_list_size; j++) { + if (memory_id == mem_list[j].memory_id) { + dev_info(adev->dev, + "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n", + err_cnt, err_type_name, + reg_list[i].block_name, + instance, mem_list[j].name); + break; + } + } + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 17b3d1992e80..c820af7f1a4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -314,6 +314,43 @@ enum amdgpu_ras_ret { AMDGPU_RAS_PT, }; +/* ras error status reisger fields */ +#define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +#define ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define ERR_STATUS__ERR_CNT__SHIFT 0x17 +#define ERR_STATUS__ERR_CNT_MASK 0x03800000L + +#define AMDGPU_RAS_REG_ENTRY(ip, inst, reg_lo, reg_hi) \ + ip##_HWIP, inst, reg_lo##_BASE_IDX, reg_lo, reg_hi##_BASE_IDX, reg_hi + +#define AMDGPU_RAS_REG_ENTRY_OFFSET(hwip, ip_inst, segment, reg) \ + (adev->reg_offset[hwip][ip_inst][segment] + (reg)) + +#define AMDGPU_RAS_ERR_INFO_VALID (1 << 0) +#define AMDGPU_RAS_ERR_STATUS_VALID (1 << 1) +#define AMDGPU_RAS_ERR_ADDRESS_VALID (1 << 2) + +struct amdgpu_ras_err_status_reg_entry { + uint32_t hwip; + uint32_t ip_inst; + uint32_t seg_lo; + uint32_t reg_lo; + uint32_t seg_hi; + uint32_t reg_hi; + uint32_t reg_inst; + uint32_t flags; + const char *block_name; +}; + +struct amdgpu_ras_memory_id_entry { + uint32_t memory_id; + const char *name; +}; + struct ras_common_if { enum amdgpu_ras_block block; enum amdgpu_ras_error_type type; @@ -696,4 +733,21 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object *ras_block_obj); void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev); +void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name); +bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_entry, + uint32_t instance, + uint32_t *memory_id); +bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_entry, + uint32_t instance, + unsigned long *err_cnt); +void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_list, + uint32_t reg_list_size, + const struct amdgpu_ras_memory_id_entry *mem_list, + uint32_t mem_list_size, + uint32_t instance, + uint32_t err_type, + unsigned long *err_count); #endif From e53a3250f76b8a0dd5b533bd0ce0dc821055e77d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 3 Feb 2023 16:10:37 +0800 Subject: [PATCH 422/861] drm/amdgpu: Add common helper to reset ras error Add common helper to reset ras error status. It applies to IP blocks that follow the new ras error logging register design, and need to write 0 to reset the error status. For IP blocks that don't support the new design, please still implement ip specific helper. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 57e86af0c906..8a16a06cb78a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -3222,3 +3222,23 @@ void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, } } } + +void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_list, + uint32_t reg_list_size, + uint32_t instance) +{ + uint32_t err_status_lo_offset, err_status_hi_offset; + uint32_t i; + + for (i = 0; i < reg_list_size; i++) { + err_status_lo_offset = + AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, + reg_list[i].seg_lo, reg_list[i].reg_lo); + err_status_hi_offset = + AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, + reg_list[i].seg_hi, reg_list[i].reg_hi); + WREG32(err_status_lo_offset, 0); + WREG32(err_status_hi_offset, 0); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index c820af7f1a4b..e96333d0c269 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -750,4 +750,8 @@ void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, uint32_t instance, uint32_t err_type, unsigned long *err_count); +void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, + const struct amdgpu_ras_err_status_reg_entry *reg_list, + uint32_t reg_list_size, + uint32_t instance); #endif From d90d90a1978af6530c7d8b201c4ab117d0506b1a Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 23 Dec 2022 15:54:43 +0800 Subject: [PATCH 423/861] drm/amdgpu: Add sdma v4_4_2 ras registers SDMA_UE_ERR_STATUS_HI|LO are introduced in v4_4_2 to replace SDMA_EDC_COUNTER/COUNTER2 registers to log SDMA RAS errors Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- .../include/asic_reg/sdma/sdma_4_4_2_offset.h | 4 ++++ .../asic_reg/sdma/sdma_4_4_2_sh_mask.h | 24 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h index 31bef0776ded..ead81aeffd67 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h @@ -211,6 +211,10 @@ #define regSDMA_RAS_STATUS_BASE_IDX 0 #define regSDMA_CLK_STATUS 0x0068 #define regSDMA_CLK_STATUS_BASE_IDX 0 +#define regSDMA_UE_ERR_STATUS_LO 0x0069 +#define regSDMA_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regSDMA_UE_ERR_STATUS_HI 0x006a +#define regSDMA_UE_ERR_STATUS_HI_BASE_IDX 0 #define regSDMA_POWER_CNTL 0x006b #define regSDMA_POWER_CNTL_BASE_IDX 0 #define regSDMA_CLK_CTRL 0x006c diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h index e46cb3339355..290953bdf1d6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h @@ -1171,6 +1171,30 @@ #define SDMA_CLK_STATUS__F32_CLK_MASK 0x00000008L #define SDMA_CLK_STATUS__CE_CLK_MASK 0x00000010L #define SDMA_CLK_STATUS__PERF_CLK_MASK 0x00000020L +//SDMA_UE_ERR_STATUS_LO +#define SDMA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SDMA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SDMA_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SDMA_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SDMA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SDMA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SDMA_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SDMA_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SDMA_UE_ERR_STATUS_HI +#define SDMA_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SDMA_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define SDMA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SDMA_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SDMA_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define SDMA_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define SDMA_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define SDMA_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SDMA_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define SDMA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SDMA_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SDMA_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define SDMA_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define SDMA_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L //SDMA_POWER_CNTL #define SDMA_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0 #define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1 From dc37a9194ad20c4f09f22ec79cc2b5e5eb57c5a2 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 5 Feb 2023 22:54:50 +0800 Subject: [PATCH 424/861] drm/amdgpu: Add query_ras_error_count for sdma v4_4_2 Add query_ras_error_count callback for sdma v4_4_2. It will be used to query and log sdma uncorrectable error count and memory block. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 28 +++++++++++ drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 64 ++++++++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 62afb282a3ff..513ac22120c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -62,6 +62,34 @@ struct amdgpu_sdma_instance { uint32_t aid_id; }; +enum amdgpu_sdma_ras_memory_id { + AMDGPU_SDMA_MBANK_DATA_BUF0 = 1, + AMDGPU_SDMA_MBANK_DATA_BUF1 = 2, + AMDGPU_SDMA_MBANK_DATA_BUF2 = 3, + AMDGPU_SDMA_MBANK_DATA_BUF3 = 4, + AMDGPU_SDMA_MBANK_DATA_BUF4 = 5, + AMDGPU_SDMA_MBANK_DATA_BUF5 = 6, + AMDGPU_SDMA_MBANK_DATA_BUF6 = 7, + AMDGPU_SDMA_MBANK_DATA_BUF7 = 8, + AMDGPU_SDMA_MBANK_DATA_BUF8 = 9, + AMDGPU_SDMA_MBANK_DATA_BUF9 = 10, + AMDGPU_SDMA_MBANK_DATA_BUF10 = 11, + AMDGPU_SDMA_MBANK_DATA_BUF11 = 12, + AMDGPU_SDMA_MBANK_DATA_BUF12 = 13, + AMDGPU_SDMA_MBANK_DATA_BUF13 = 14, + AMDGPU_SDMA_MBANK_DATA_BUF14 = 15, + AMDGPU_SDMA_MBANK_DATA_BUF15 = 16, + AMDGPU_SDMA_UCODE_BUF = 17, + AMDGPU_SDMA_RB_CMD_BUF = 18, + AMDGPU_SDMA_IB_CMD_BUF = 19, + AMDGPU_SDMA_UTCL1_RD_FIFO = 20, + AMDGPU_SDMA_UTCL1_RDBST_FIFO = 21, + AMDGPU_SDMA_UTCL1_WR_FIFO = 22, + AMDGPU_SDMA_DATA_LUT_FIFO = 23, + AMDGPU_SDMA_SPLIT_DAT_BUF = 24, + AMDGPU_SDMA_MEMORY_BLOCK_LAST, +}; + struct amdgpu_sdma_ras { struct amdgpu_ras_block_object ras_block; }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index dca0beec0252..925ca6218a5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2071,3 +2071,67 @@ struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs = { .suspend = &sdma_v4_4_2_xcp_suspend, .resume = &sdma_v4_4_2_xcp_resume }; + +static const struct amdgpu_ras_err_status_reg_entry sdma_v4_2_2_ue_reg_list[] = { + {AMDGPU_RAS_REG_ENTRY(SDMA0, 0, regSDMA_UE_ERR_STATUS_LO, regSDMA_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SDMA"}, +}; + +static const struct amdgpu_ras_memory_id_entry sdma_v4_4_2_ras_memory_list[] = { + {AMDGPU_SDMA_MBANK_DATA_BUF0, "SDMA_MBANK_DATA_BUF0"}, + {AMDGPU_SDMA_MBANK_DATA_BUF1, "SDMA_MBANK_DATA_BUF1"}, + {AMDGPU_SDMA_MBANK_DATA_BUF2, "SDMA_MBANK_DATA_BUF2"}, + {AMDGPU_SDMA_MBANK_DATA_BUF3, "SDMA_MBANK_DATA_BUF3"}, + {AMDGPU_SDMA_MBANK_DATA_BUF4, "SDMA_MBANK_DATA_BUF4"}, + {AMDGPU_SDMA_MBANK_DATA_BUF5, "SDMA_MBANK_DATA_BUF5"}, + {AMDGPU_SDMA_MBANK_DATA_BUF6, "SDMA_MBANK_DATA_BUF6"}, + {AMDGPU_SDMA_MBANK_DATA_BUF7, "SDMA_MBANK_DATA_BUF7"}, + {AMDGPU_SDMA_MBANK_DATA_BUF8, "SDMA_MBANK_DATA_BUF8"}, + {AMDGPU_SDMA_MBANK_DATA_BUF9, "SDMA_MBANK_DATA_BUF9"}, + {AMDGPU_SDMA_MBANK_DATA_BUF10, "SDMA_MBANK_DATA_BUF10"}, + {AMDGPU_SDMA_MBANK_DATA_BUF11, "SDMA_MBANK_DATA_BUF11"}, + {AMDGPU_SDMA_MBANK_DATA_BUF12, "SDMA_MBANK_DATA_BUF12"}, + {AMDGPU_SDMA_MBANK_DATA_BUF13, "SDMA_MBANK_DATA_BUF13"}, + {AMDGPU_SDMA_MBANK_DATA_BUF14, "SDMA_MBANK_DATA_BUF14"}, + {AMDGPU_SDMA_MBANK_DATA_BUF15, "SDMA_MBANK_DATA_BUF15"}, + {AMDGPU_SDMA_UCODE_BUF, "SDMA_UCODE_BUF"}, + {AMDGPU_SDMA_RB_CMD_BUF, "SDMA_RB_CMD_BUF"}, + {AMDGPU_SDMA_IB_CMD_BUF, "SDMA_IB_CMD_BUF"}, + {AMDGPU_SDMA_UTCL1_RD_FIFO, "SDMA_UTCL1_RD_FIFO"}, + {AMDGPU_SDMA_UTCL1_RDBST_FIFO, "SDMA_UTCL1_RDBST_FIFO"}, + {AMDGPU_SDMA_UTCL1_WR_FIFO, "SDMA_UTCL1_WR_FIFO"}, + {AMDGPU_SDMA_DATA_LUT_FIFO, "SDMA_DATA_LUT_FIFO"}, + {AMDGPU_SDMA_SPLIT_DAT_BUF, "SDMA_SPLIT_DAT_BUF"}, +}; + +static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, + uint32_t sdma_inst, + void *ras_err_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; + + /* sdma v4_4_2 doesn't support query ce counts */ + amdgpu_ras_inst_query_ras_error_count(adev, + sdma_v4_2_2_ue_reg_list, + ARRAY_SIZE(sdma_v4_2_2_ue_reg_list), + sdma_v4_4_2_ras_memory_list, + ARRAY_SIZE(sdma_v4_4_2_ras_memory_list), + sdma_inst, + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &err_data->ue_count); +} + +static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, + void *ras_err_status) +{ + uint32_t inst_mask; + int i = 0; + + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for_each_inst(i, inst_mask) + sdma_v4_4_2_inst_query_ras_error_count(adev, i, ras_err_status); + } else { + dev_warn(adev->dev, "SDMA RAS is not supported\n"); + } +} From a64b15520cc3a19bc6ca807870b51b37774a4d3d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 22 Jan 2023 12:19:57 +0800 Subject: [PATCH 425/861] drm/amdgpu: Add reset_ras_error_count for sdma v4_4_2 Add reset_ras_error_count callback for sdma v4_4_2. It will be used to reset sdma ras error count. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 925ca6218a5e..f0333822df78 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2135,3 +2135,26 @@ static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, dev_warn(adev->dev, "SDMA RAS is not supported\n"); } } + +static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev, + uint32_t sdma_inst) +{ + amdgpu_ras_inst_reset_ras_error_count(adev, + sdma_v4_2_2_ue_reg_list, + ARRAY_SIZE(sdma_v4_2_2_ue_reg_list), + sdma_inst); +} + +static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t inst_mask; + int i = 0; + + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for_each_inst(i, inst_mask) + sdma_v4_4_2_inst_reset_ras_error_count(adev, i); + } else { + dev_warn(adev->dev, "SDMA RAS is not supported\n"); + } +} From 1e69fde70030e2fd2b729de5998d3fd6c94be238 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 22 Jan 2023 23:29:28 +0800 Subject: [PATCH 426/861] drm/amdgpu: Initialize sdma v4_4_2 ras function Initialize sdma v4_4_2 ras function and interrupt handler. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 48 +++++++++++++++++++----- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index f0333822df78..570ea68c521f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -54,6 +54,7 @@ static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev); static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) @@ -1254,6 +1255,7 @@ static int sdma_v4_4_2_early_init(void *handle) sdma_v4_4_2_set_buffer_funcs(adev); sdma_v4_4_2_set_vm_pte_funcs(adev); sdma_v4_4_2_set_irq_funcs(adev); + sdma_v4_4_2_set_ras_funcs(adev); return 0; } @@ -1377,6 +1379,11 @@ static int sdma_v4_4_2_sw_init(void *handle) } } + if (amdgpu_sdma_ras_sw_init(adev)) { + dev_err(adev->dev, "fail to initialize sdma ras block\n"); + return -EINVAL; + } + return r; } @@ -1558,7 +1565,7 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, * be disabled and the driver should only look for the aggregated * interrupt via sync flood */ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) goto out; instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); @@ -1597,15 +1604,22 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 sdma_edc_config; + u32 sdma_cntl; - sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG); - /* - * FIXME: This was inherited from Aldebaran, but no this field - * definition in the regspec of both Aldebaran and SDMA 4.4.2 - */ - sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0; - WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config); + sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL); + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, + DRAM_ECC_INT_ENABLE, 0); + WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); + break; + /* sdma ecc interrupt is enabled by default + * driver doesn't need to do anything to + * enable the interrupt */ + case AMDGPU_IRQ_STATE_ENABLE: + default: + break; + } return 0; } @@ -2158,3 +2172,19 @@ static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev) dev_warn(adev->dev, "SDMA RAS is not supported\n"); } } + +static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = { + .query_ras_error_count = sdma_v4_4_2_query_ras_error_count, + .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count, +}; + +static struct amdgpu_sdma_ras sdma_v4_4_2_ras = { + .ras_block = { + .hw_ops = &sdma_v4_4_2_ras_hw_ops, + }, +}; + +static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->sdma.ras = &sdma_v4_4_2_ras; +} From 90cbee204e6619e47b1ec9fc14ebe03852585dac Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 28 Dec 2022 18:18:38 +0800 Subject: [PATCH 427/861] drm/amdgpu: Add mmhub v1_8_0 ras err status registers add new ras error status registers introduced in mmhub v1_8_0 to log mmea and mm_cane ras err, including MMEAx_UE|CE_ERR_STATUS_LO|HI MM_CANE_UE|CE_ERR_STATUS_LO|HI Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- .../asic_reg/mmhub/mmhub_1_8_0_offset.h | 56 ++- .../asic_reg/mmhub/mmhub_1_8_0_sh_mask.h | 325 +++++++++++++++++- 2 files changed, 373 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h index 8bcc81f2dfc0..879ee9de3ff3 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h @@ -1491,6 +1491,10 @@ #define regMMEA0_PERFCOUNTER1_CFG_BASE_IDX 0 #define regMMEA0_PERFCOUNTER_RSLT_CNTL 0x0400 #define regMMEA0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regMMEA0_UE_ERR_STATUS_LO 0x0406 +#define regMMEA0_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regMMEA0_UE_ERR_STATUS_HI 0x0407 +#define regMMEA0_UE_ERR_STATUS_HI_BASE_IDX 0 #define regMMEA0_DSM_CNTL 0x0408 #define regMMEA0_DSM_CNTL_BASE_IDX 0 #define regMMEA0_DSM_CNTLA 0x0409 @@ -1511,8 +1515,12 @@ #define regMMEA0_ERR_STATUS_BASE_IDX 0 #define regMMEA0_MISC2 0x0412 #define regMMEA0_MISC2_BASE_IDX 0 +#define regMMEA0_CE_ERR_STATUS_LO 0x0414 +#define regMMEA0_CE_ERR_STATUS_LO_BASE_IDX 0 #define regMMEA0_MISC_AON 0x0415 #define regMMEA0_MISC_AON_BASE_IDX 0 +#define regMMEA0_CE_ERR_STATUS_HI 0x0416 +#define regMMEA0_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: aid_mmhub_ea_mmeadec1 @@ -1709,6 +1717,10 @@ #define regMMEA1_PERFCOUNTER1_CFG_BASE_IDX 0 #define regMMEA1_PERFCOUNTER_RSLT_CNTL 0x0540 #define regMMEA1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regMMEA1_UE_ERR_STATUS_LO 0x0546 +#define regMMEA1_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regMMEA1_UE_ERR_STATUS_HI 0x0547 +#define regMMEA1_UE_ERR_STATUS_HI_BASE_IDX 0 #define regMMEA1_DSM_CNTL 0x0548 #define regMMEA1_DSM_CNTL_BASE_IDX 0 #define regMMEA1_DSM_CNTLA 0x0549 @@ -1729,8 +1741,12 @@ #define regMMEA1_ERR_STATUS_BASE_IDX 0 #define regMMEA1_MISC2 0x0552 #define regMMEA1_MISC2_BASE_IDX 0 +#define regMMEA1_CE_ERR_STATUS_LO 0x0554 +#define regMMEA1_CE_ERR_STATUS_LO_BASE_IDX 0 #define regMMEA1_MISC_AON 0x0555 #define regMMEA1_MISC_AON_BASE_IDX 0 +#define regMMEA1_CE_ERR_STATUS_HI 0x0556 +#define regMMEA1_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: aid_mmhub_ea_mmeadec2 @@ -1927,6 +1943,10 @@ #define regMMEA2_PERFCOUNTER1_CFG_BASE_IDX 0 #define regMMEA2_PERFCOUNTER_RSLT_CNTL 0x0680 #define regMMEA2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regMMEA2_UE_ERR_STATUS_LO 0x0686 +#define regMMEA2_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regMMEA2_UE_ERR_STATUS_HI 0x0687 +#define regMMEA2_UE_ERR_STATUS_HI_BASE_IDX 0 #define regMMEA2_DSM_CNTL 0x0688 #define regMMEA2_DSM_CNTL_BASE_IDX 0 #define regMMEA2_DSM_CNTLA 0x0689 @@ -1947,8 +1967,12 @@ #define regMMEA2_ERR_STATUS_BASE_IDX 0 #define regMMEA2_MISC2 0x0692 #define regMMEA2_MISC2_BASE_IDX 0 +#define regMMEA2_CE_ERR_STATUS_LO 0x0694 +#define regMMEA2_CE_ERR_STATUS_LO_BASE_IDX 0 #define regMMEA2_MISC_AON 0x0695 #define regMMEA2_MISC_AON_BASE_IDX 0 +#define regMMEA2_CE_ERR_STATUS_HI 0x0696 +#define regMMEA2_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: aid_mmhub_ea_mmeadec3 @@ -2145,6 +2169,10 @@ #define regMMEA3_PERFCOUNTER1_CFG_BASE_IDX 0 #define regMMEA3_PERFCOUNTER_RSLT_CNTL 0x07c0 #define regMMEA3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regMMEA3_UE_ERR_STATUS_LO 0x07c6 +#define regMMEA3_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regMMEA3_UE_ERR_STATUS_HI 0x07c7 +#define regMMEA3_UE_ERR_STATUS_HI_BASE_IDX 0 #define regMMEA3_DSM_CNTL 0x07c8 #define regMMEA3_DSM_CNTL_BASE_IDX 0 #define regMMEA3_DSM_CNTLA 0x07c9 @@ -2165,9 +2193,12 @@ #define regMMEA3_ERR_STATUS_BASE_IDX 0 #define regMMEA3_MISC2 0x07d2 #define regMMEA3_MISC2_BASE_IDX 0 +#define regMMEA3_CE_ERR_STATUS_LO 0x07d4 +#define regMMEA3_CE_ERR_STATUS_LO_BASE_IDX 0 #define regMMEA3_MISC_AON 0x07d5 #define regMMEA3_MISC_AON_BASE_IDX 0 - +#define regMMEA3_CE_ERR_STATUS_HI 0x07d6 +#define regMMEA3_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: aid_mmhub_ea_mmeadec4 // base address: 0x62000 @@ -2363,6 +2394,10 @@ #define regMMEA4_PERFCOUNTER1_CFG_BASE_IDX 0 #define regMMEA4_PERFCOUNTER_RSLT_CNTL 0x0900 #define regMMEA4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regMMEA4_UE_ERR_STATUS_LO 0x0906 +#define regMMEA4_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regMMEA4_UE_ERR_STATUS_HI 0x0907 +#define regMMEA4_UE_ERR_STATUS_HI_BASE_IDX 0 #define regMMEA4_DSM_CNTL 0x0908 #define regMMEA4_DSM_CNTL_BASE_IDX 0 #define regMMEA4_DSM_CNTLA 0x0909 @@ -2383,9 +2418,12 @@ #define regMMEA4_ERR_STATUS_BASE_IDX 0 #define regMMEA4_MISC2 0x0912 #define regMMEA4_MISC2_BASE_IDX 0 +#define regMMEA4_CE_ERR_STATUS_LO 0x0914 +#define regMMEA4_CE_ERR_STATUS_LO_BASE_IDX 0 #define regMMEA4_MISC_AON 0x0915 #define regMMEA4_MISC_AON_BASE_IDX 0 - +#define regMMEA4_CE_ERR_STATUS_HI 0x0916 +#define regMMEA4_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: aid_mmhub_pctldec0 // base address: 0x62a00 @@ -3310,5 +3348,19 @@ #define regL2TLB_PERFCOUNTER_HI 0x0d2d #define regL2TLB_PERFCOUNTER_HI_BASE_IDX 0 +// addressBlock: aid_mmhub_mm_cane_mmcanedec +// base address: 0x635f0 +#define regMM_CANE_ICG_CTRL 0x0d8a +#define regMM_CANE_ICG_CTRL_BASE_IDX 0 +#define regMM_CANE_ERR_STATUS 0x0d8c +#define regMM_CANE_ERR_STATUS_BASE_IDX 0 +#define regMM_CANE_UE_ERR_STATUS_LO 0x0d8d +#define regMM_CANE_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regMM_CANE_UE_ERR_STATUS_HI 0x0d8e +#define regMM_CANE_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regMM_CANE_CE_ERR_STATUS_LO 0x0d8f +#define regMM_CANE_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regMM_CANE_CE_ERR_STATUS_HI 0x0d90 +#define regMM_CANE_CE_ERR_STATUS_HI_BASE_IDX 0 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h index af41468ce69f..088c1f02aa43 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h @@ -10470,6 +10470,30 @@ #define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L #define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA0_UE_ERR_STATUS_LO +#define MMEA0_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA0_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA0_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA0_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA0_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA0_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MMEA0_UE_ERR_STATUS_HI +#define MMEA0_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA0_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA0_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define MMEA0_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define MMEA0_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d +#define MMEA0_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA0_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA0_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define MMEA0_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define MMEA0_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L //MMEA0_DSM_CNTL #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -10718,12 +10742,35 @@ #define MMEA0_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L #define MMEA0_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L #define MMEA0_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L +//MMEA0_CE_ERR_STATUS_LO +#define MMEA0_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA0_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA0_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA0_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA0_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA0_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L //MMEA0_MISC_AON #define MMEA0_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0 #define MMEA0_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2 #define MMEA0_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L #define MMEA0_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L - +//MMEA0_CE_ERR_STATUS_HI +#define MMEA0_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1 +#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA0_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define MMEA0_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b +#define MMEA0_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L +#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA0_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define MMEA0_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L // addressBlock: aid_mmhub_ea_mmeadec1 //MMEA1_DRAM_RD_CLI2GRP_MAP0 @@ -12418,6 +12465,30 @@ #define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L #define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA1_UE_ERR_STATUS_LO +#define MMEA1_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA1_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA1_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA1_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA1_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA1_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MMEA1_UE_ERR_STATUS_HI +#define MMEA1_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA1_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA1_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define MMEA1_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define MMEA1_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d +#define MMEA1_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA1_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA1_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define MMEA1_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define MMEA1_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L //MMEA1_DSM_CNTL #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -12666,12 +12737,35 @@ #define MMEA1_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L #define MMEA1_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L #define MMEA1_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L +//MMEA1_CE_ERR_STATUS_LO +#define MMEA1_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA1_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA1_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA1_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA1_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA1_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L //MMEA1_MISC_AON #define MMEA1_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0 #define MMEA1_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2 #define MMEA1_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L #define MMEA1_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L - +//MMEA1_CE_ERR_STATUS_HI +#define MMEA1_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1 +#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA1_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define MMEA1_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b +#define MMEA1_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L +#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA1_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define MMEA1_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L // addressBlock: aid_mmhub_ea_mmeadec2 //MMEA2_DRAM_RD_CLI2GRP_MAP0 @@ -14366,6 +14460,30 @@ #define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L #define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA2_UE_ERR_STATUS_LO +#define MMEA2_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA2_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MMEA2_UE_ERR_STATUS_HI +#define MMEA2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define MMEA2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define MMEA2_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d +#define MMEA2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define MMEA2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define MMEA2_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L //MMEA2_DSM_CNTL #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -14614,12 +14732,35 @@ #define MMEA2_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L #define MMEA2_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L #define MMEA2_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L +//MMEA2_CE_ERR_STATUS_LO +#define MMEA2_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA2_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L //MMEA2_MISC_AON #define MMEA2_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0 #define MMEA2_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2 #define MMEA2_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L #define MMEA2_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L - +//MMEA2_CE_ERR_STATUS_HI +#define MMEA2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1 +#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define MMEA2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b +#define MMEA2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L +#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define MMEA2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L // addressBlock: aid_mmhub_ea_mmeadec3 //MMEA3_DRAM_RD_CLI2GRP_MAP0 @@ -16314,6 +16455,30 @@ #define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L #define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA3_UE_ERR_STATUS_LO +#define MMEA3_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA3_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA3_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA3_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA3_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA3_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA3_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA3_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MMEA3_UE_ERR_STATUS_HI +#define MMEA3_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA3_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA3_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define MMEA3_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define MMEA3_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d +#define MMEA3_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA3_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA3_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define MMEA3_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define MMEA3_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L //MMEA3_DSM_CNTL #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -16562,12 +16727,35 @@ #define MMEA3_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L #define MMEA3_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L #define MMEA3_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L +//MMEA3_CE_ERR_STATUS_LO +#define MMEA3_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA3_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA3_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA3_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA3_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA3_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA3_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA3_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L //MMEA3_MISC_AON #define MMEA3_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0 #define MMEA3_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2 #define MMEA3_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L #define MMEA3_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L - +//MMEA3_CE_ERR_STATUS_HI +#define MMEA3_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1 +#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA3_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define MMEA3_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b +#define MMEA3_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L +#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA3_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define MMEA3_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L // addressBlock: aid_mmhub_ea_mmeadec4 //MMEA4_DRAM_RD_CLI2GRP_MAP0 @@ -18262,6 +18450,30 @@ #define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L #define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA4_UE_ERR_STATUS_LO +#define MMEA4_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA4_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA4_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA4_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA4_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA4_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA4_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA4_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MMEA4_UE_ERR_STATUS_HI +#define MMEA4_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA4_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA4_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define MMEA4_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define MMEA4_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d +#define MMEA4_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA4_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA4_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define MMEA4_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define MMEA4_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L //MMEA4_DSM_CNTL #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -18510,12 +18722,35 @@ #define MMEA4_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L #define MMEA4_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L #define MMEA4_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L +//MMEA4_CE_ERR_STATUS_LO +#define MMEA4_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define MMEA4_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MMEA4_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MMEA4_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MMEA4_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define MMEA4_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MMEA4_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MMEA4_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L //MMEA4_MISC_AON #define MMEA4_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0 #define MMEA4_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2 #define MMEA4_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L #define MMEA4_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L - +//MMEA4_CE_ERR_STATUS_HI +#define MMEA4_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1 +#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MMEA4_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define MMEA4_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b +#define MMEA4_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L +#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MMEA4_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define MMEA4_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L // addressBlock: aid_mmhub_pctldec0 //PCTL0_CTRL @@ -22311,5 +22546,83 @@ #define L2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL #define L2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L - +// addressBlock: aid_mmhub_mm_cane_mmcanedec +//MM_CANE_ICG_CTRL +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_IREQ0__SHIFT 0x0 +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_ATRET__SHIFT 0x1 +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_OREQ__SHIFT 0x2 +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x3 +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_SDPM_RETURN__SHIFT 0x4 +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_IREQ0_MASK 0x00000001L +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_ATRET_MASK 0x00000002L +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_OREQ_MASK 0x00000004L +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000008L +#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_SDPM_RETURN_MASK 0x00000010L +//MM_CANE_ERR_STATUS +#define MM_CANE_ERR_STATUS__SDPM_RDRSP_STATUS__SHIFT 0x0 +#define MM_CANE_ERR_STATUS__SDPM_WRRSP_STATUS__SHIFT 0x4 +#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS__SHIFT 0x8 +#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MM_CANE_ERR_STATUS__SDPS_DAT_ERROR__SHIFT 0xb +#define MM_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR__SHIFT 0xc +#define MM_CANE_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xd +#define MM_CANE_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xe +#define MM_CANE_ERR_STATUS__BUSY_ON_UER_ERROR__SHIFT 0xf +#define MM_CANE_ERR_STATUS__FUE_FLAG__SHIFT 0x10 +#define MM_CANE_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0x11 +#define MM_CANE_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x12 +#define MM_CANE_ERR_STATUS__SDPM_RDRSP_STATUS_MASK 0x0000000FL +#define MM_CANE_ERR_STATUS__SDPM_WRRSP_STATUS_MASK 0x000000F0L +#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS_MASK 0x00000300L +#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MM_CANE_ERR_STATUS__SDPS_DAT_ERROR_MASK 0x00000800L +#define MM_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR_MASK 0x00001000L +#define MM_CANE_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00002000L +#define MM_CANE_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00004000L +#define MM_CANE_ERR_STATUS__BUSY_ON_UER_ERROR_MASK 0x00008000L +#define MM_CANE_ERR_STATUS__FUE_FLAG_MASK 0x00010000L +#define MM_CANE_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00020000L +#define MM_CANE_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00040000L +//MM_CANE_UE_ERR_STATUS_LO +#define MM_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MM_CANE_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MM_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MM_CANE_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MM_CANE_UE_ERR_STATUS_HI +#define MM_CANE_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MM_CANE_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MM_CANE_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define MM_CANE_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define MM_CANE_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MM_CANE_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MM_CANE_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define MM_CANE_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +//MM_CANE_CE_ERR_STATUS_LO +#define MM_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define MM_CANE_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define MM_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define MM_CANE_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//MM_CANE_CE_ERR_STATUS_HI +#define MM_CANE_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define MM_CANE_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define MM_CANE_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define MM_CANE_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define MM_CANE_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define MM_CANE_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L #endif From bc069d823bffd774294f5c3b12757a50fb726fd0 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 2 Feb 2023 21:00:39 +0800 Subject: [PATCH 428/861] drm/amdgpu: Add query_ras_error_count for mmhub v1_8 Add query_ras_error_count callback for mmhub v1_8. It will be used to query and log mmhub error count and memory block. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h | 23 ++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 93 +++++++++++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index d21bb6dae56e..1ca9d4ed8063 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -21,6 +21,29 @@ #ifndef __AMDGPU_MMHUB_H__ #define __AMDGPU_MMHUB_H__ +enum amdgpu_mmhub_ras_memory_id { + AMDGPU_MMHUB_WGMI_PAGEMEM = 0, + AMDGPU_MMHUB_RGMI_PAGEMEM = 1, + AMDGPU_MMHUB_WDRAM_PAGEMEM = 2, + AMDGPU_MMHUB_RDRAM_PAGEMEM = 3, + AMDGPU_MMHUB_WIO_CMDMEM = 4, + AMDGPU_MMHUB_RIO_CMDMEM = 5, + AMDGPU_MMHUB_WGMI_CMDMEM = 6, + AMDGPU_MMHUB_RGMI_CMDMEM = 7, + AMDGPU_MMHUB_WDRAM_CMDMEM = 8, + AMDGPU_MMHUB_RDRAM_CMDMEM = 9, + AMDGPU_MMHUB_MAM_DMEM0 = 10, + AMDGPU_MMHUB_MAM_DMEM1 = 11, + AMDGPU_MMHUB_MAM_DMEM2 = 12, + AMDGPU_MMHUB_MAM_DMEM3 = 13, + AMDGPU_MMHUB_WRET_TAGMEM = 19, + AMDGPU_MMHUB_RRET_TAGMEM = 20, + AMDGPU_MMHUB_WIO_DATAMEM = 21, + AMDGPU_MMHUB_WGMI_DATAMEM = 22, + AMDGPU_MMHUB_WDRAM_DATAMEM = 23, + AMDGPU_MMHUB_MEMORY_BLOCK_LAST, +}; + struct amdgpu_mmhub_ras { struct amdgpu_ras_block_object ras_block; }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index a8faf66b6878..11240ca5ad83 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -29,6 +29,7 @@ #include "soc15_common.h" #include "soc15.h" +#include "amdgpu_ras.h" #define regVM_L2_CNTL3_DEFAULT 0x80100007 #define regVM_L2_CNTL4_DEFAULT 0x000000c1 @@ -579,3 +580,95 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .set_clockgating = mmhub_v1_8_set_clockgating, .get_clockgating = mmhub_v1_8_get_clockgating, }; + +static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = { + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI), + 1, 0, "MM_CANE"}, +}; + +static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = { + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"}, + {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI), + 1, 0, "MM_CANE"}, +}; + +static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = { + {AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"}, + {AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"}, + {AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"}, + {AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"}, + {AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"}, + {AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"}, + {AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"}, + {AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"}, + {AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"}, + {AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"}, + {AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"}, + {AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"}, + {AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"}, + {AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"}, + {AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"}, + {AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"}, + {AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"}, + {AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"}, + {AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"}, +}; + +static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, + uint32_t mmhub_inst, + void *ras_err_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; + + amdgpu_ras_inst_query_ras_error_count(adev, + mmhub_v1_8_ce_reg_list, + ARRAY_SIZE(mmhub_v1_8_ce_reg_list), + mmhub_v1_8_ras_memory_list, + ARRAY_SIZE(mmhub_v1_8_ras_memory_list), + mmhub_inst, + AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, + &err_data->ce_count); + amdgpu_ras_inst_query_ras_error_count(adev, + mmhub_v1_8_ue_reg_list, + ARRAY_SIZE(mmhub_v1_8_ue_reg_list), + mmhub_v1_8_ras_memory_list, + ARRAY_SIZE(mmhub_v1_8_ras_memory_list), + mmhub_inst, + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &err_data->ue_count); +} + +static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, + void *ras_err_status) +{ + uint32_t inst_mask; + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + dev_warn(adev->dev, "MMHUB RAS is not supported\n"); + return; + } + + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) + mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status); +} From a0cdb3d09332900e145fb99f52d1d571d7030183 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 22 Jan 2023 22:22:06 +0800 Subject: [PATCH 429/861] drm/amdgpu: Add reset_ras_error_count for mmhub v1_8 Add reset_ras_error_count callback for mmhub v1_8. It will be used to reset mmhub ras error count. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 28 +++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 11240ca5ad83..f40a6a9f3c6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -672,3 +672,31 @@ static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, for_each_inst(i, inst_mask) mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status); } + +static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev, + uint32_t mmhub_inst) +{ + amdgpu_ras_inst_reset_ras_error_count(adev, + mmhub_v1_8_ce_reg_list, + ARRAY_SIZE(mmhub_v1_8_ce_reg_list), + mmhub_inst); + amdgpu_ras_inst_reset_ras_error_count(adev, + mmhub_v1_8_ue_reg_list, + ARRAY_SIZE(mmhub_v1_8_ue_reg_list), + mmhub_inst); +} + +static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t inst_mask; + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + dev_warn(adev->dev, "MMHUB RAS is not supported\n"); + return; + } + + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) + mmhub_v1_8_inst_reset_ras_error_count(adev, i); +} From 00c145222b6ac93ee9926a8ef2e41dae55b97b7e Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 22 Jan 2023 23:36:25 +0800 Subject: [PATCH 430/861] drm/amdgpu: Add query_ras_error_status for mmhub v1_8 Add query_ras_error_status callback for mmhub v1_8. It will be used to log mmhub error status. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 56 +++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index f40a6a9f3c6d..2923304e653a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -700,3 +700,59 @@ static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev) for_each_inst(i, inst_mask) mmhub_v1_8_inst_reset_ras_error_count(adev, i); } + +static const uint32_t mmhub_v1_8_mmea_err_status_reg[] = { + regMMEA0_ERR_STATUS, + regMMEA1_ERR_STATUS, + regMMEA2_ERR_STATUS, + regMMEA3_ERR_STATUS, + regMMEA4_ERR_STATUS, +}; + +static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev, + uint32_t mmhub_inst) +{ + uint32_t reg_value; + uint32_t mmea_err_status_addr_dist; + uint32_t i; + + /* query mmea ras err status */ + mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; + for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) { + reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_ERR_STATUS, + i * mmea_err_status_addr_dist); + if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { + dev_warn(adev->dev, + "Detected MMEA%d err in MMHUB%d, status: 0x%x\n", + i, mmhub_inst, reg_value); + } + } + + /* query mm_cane ras err status */ + reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS); + if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) { + dev_warn(adev->dev, + "Detected MM CANE err in MMHUB%d, status: 0x%x\n", + mmhub_inst, reg_value); + } +} + +static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev) +{ + uint32_t inst_mask; + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + dev_warn(adev->dev, "MMHUB RAS is not supported\n"); + return; + } + + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) + mmhub_v1_8_inst_query_ras_err_status(adev, i); +} From ccfdbd4bdc0875ee8b8fe00691a6b5f883227bdb Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 22 Jan 2023 23:20:09 +0800 Subject: [PATCH 431/861] drm/amdgpu: Add reset_ras_error_status for mmhub v1_8 Add reset_ras_error_status callback for mmhub v1_8. It will be used to reset mmhub error status. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 91 +++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 2923304e653a..4f274c7db591 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -756,3 +756,94 @@ static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev) for_each_inst(i, inst_mask) mmhub_v1_8_inst_query_ras_err_status(adev, i); } + +static const uint32_t mmhub_v1_8_mmea_cgtt_clk_cntl_reg[] = { + regMMEA0_CGTT_CLK_CTRL, + regMMEA1_CGTT_CLK_CTRL, + regMMEA2_CGTT_CLK_CTRL, + regMMEA3_CGTT_CLK_CTRL, + regMMEA4_CGTT_CLK_CTRL, +}; + +static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev, + uint32_t mmhub_inst) +{ + uint32_t mmea_cgtt_clk_cntl_addr_dist; + uint32_t mmea_err_status_addr_dist; + uint32_t reg_value; + uint32_t i; + + /* reset mmea ras err status */ + mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL; + mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; + for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i ++) { + /* force clk branch on for response path + * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1 */ + reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_CGTT_CLK_CTRL, + i * mmea_cgtt_clk_cntl_addr_dist); + reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL, + SOFT_OVERRIDE_RETURN, 1); + WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_CGTT_CLK_CTRL, + i * mmea_cgtt_clk_cntl_addr_dist, + reg_value); + + /* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */ + reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_ERR_STATUS, + i * mmea_err_status_addr_dist); + reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS, + CLEAR_ERROR_STATUS, 1); + WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_ERR_STATUS, + i * mmea_err_status_addr_dist, + reg_value); + + /* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */ + reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_CGTT_CLK_CTRL, + i * mmea_cgtt_clk_cntl_addr_dist); + reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL, + SOFT_OVERRIDE_RETURN, 0); + WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, + regMMEA0_CGTT_CLK_CTRL, + i * mmea_cgtt_clk_cntl_addr_dist, + reg_value); + } + + /* reset mm_cane ras err status + * force clk branch on for response path + * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1 */ + reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); + reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, + SOFT_OVERRIDE_ATRET, 1); + WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value); + + /* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */ + reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS); + reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS, + CLEAR_ERROR_STATUS, 1); + WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value); + + /* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */ + reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); + reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, + SOFT_OVERRIDE_ATRET, 0); + WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value); +} + +static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev) +{ + uint32_t inst_mask; + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + dev_warn(adev->dev, "MMHUB RAS is not supported\n"); + return; + } + + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) + mmhub_v1_8_inst_reset_ras_err_status(adev, i); +} From 73c2b3fd2c515bcb819d801c5c4bf053fdb1e5cb Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 22 Jan 2023 23:26:40 +0800 Subject: [PATCH 432/861] drm/amdgpu: Initialize mmhub v1_8 ras function Initialize mmhub v1_8 ras function. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h | 1 + 3 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b6c500be6f70..16634a791e10 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1419,6 +1419,9 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) case IP_VERSION(9, 4, 2): adev->mmhub.ras = &mmhub_v1_7_ras; break; + case IP_VERSION(1, 8, 0): + adev->mmhub.ras = &mmhub_v1_8_ras; + break; default: /* mmhub ras is not available */ break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 4f274c7db591..3648994724c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -847,3 +847,16 @@ static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev) for_each_inst(i, inst_mask) mmhub_v1_8_inst_reset_ras_err_status(adev, i); } + +static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { + .query_ras_error_count = mmhub_v1_8_query_ras_error_count, + .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, + .query_ras_error_status = mmhub_v1_8_query_ras_error_status, + .reset_ras_error_status = mmhub_v1_8_reset_ras_error_status, +}; + +struct amdgpu_mmhub_ras mmhub_v1_8_ras = { + .ras_block = { + .hw_ops = &mmhub_v1_8_ras_hw_ops, + }, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h index 0bb36200e4e5..126f0075ac50 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h @@ -24,5 +24,6 @@ #define __MMHUB_V1_8_H__ extern const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs; +extern struct amdgpu_mmhub_ras mmhub_v1_8_ras; #endif From 8107e4996f4ec3304485c608b5880185840c464c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 29 Jan 2023 22:48:15 +0800 Subject: [PATCH 433/861] drm/amdgpu: Enable persistent edc harvesting in APP APU Persistent edc harvesting is supported in APP APU Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 8a16a06cb78a..5ae89602a116 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2624,7 +2624,8 @@ release_con: int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) { - if (adev->gmc.xgmi.connected_to_cpu) + if (adev->gmc.xgmi.connected_to_cpu || + adev->gmc.is_app_apu) return 1; return 0; } From 9faf929fbf6b457d13064d57017b2d4f62670b58 Mon Sep 17 00:00:00 2001 From: Asad kamal Date: Tue, 7 Feb 2023 20:55:24 +0800 Subject: [PATCH 434/861] drm/amdgpu: Enable CG for IH v4.4.2 Enable clock gating on IH v4.4.2 versions. Signed-off-by: Asad kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 100fdf5074b4..9fbfd0811d06 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1147,7 +1147,8 @@ static int soc15_common_early_init(void *handle) adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | From 228ce176434b0f61451019065393040d58e1668d Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Fri, 27 Jan 2023 21:57:00 -0500 Subject: [PATCH 435/861] drm/amdgpu: Handle VRAM dependencies on GFXIP9.4.3 [For 1P NPS1 mode driver bringup] Changes required to initialize the amdgpu driver with frontdoor firmware loading and discovery=2 with the native mode SBIOS that enables CPU GPU unified interleaved memory. sudo modprobe amdgpu discovery=2 Once PSP TMR region is reported via the ACPI interface, the dependency on the ip_discovery.bin will be removed. Choice of where to allocate driver table is given to each IP version. In general, both GTT and VRAM domains will be considered. If one of the tables has a strict restriction for VRAM domain, then only VRAM domain is considered. Reviewed-by: Felix Kuehling (lijo: Modified the handling for SMU Tables) Signed-off-by: Lijo Lazar Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 89 +++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 7 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 9 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 ++ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 5 ++ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 10 ++- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 6 +- 11 files changed, 99 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index af37f2ef4438..4e179e50de25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2292,8 +2292,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, (*mem)->dmabuf = dma_buf; (*mem)->bo = bo; (*mem)->va = va; - (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; + (*mem)->mapped_to_gpu_memory = 0; (*mem)->process_info = avm->process_info; add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9f0d5f02119e..f431205e1077 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1044,7 +1044,7 @@ static const char * const amdgpu_vram_names[] = { int amdgpu_bo_init(struct amdgpu_device *adev) { /* On A+A platform, VRAM can be mapped as WB */ - if (!adev->gmc.xgmi.connected_to_cpu) { + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { /* reserve PAT memory space to WC for VRAM */ int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 863fa331e6ff..4395c53d09d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -476,7 +476,8 @@ static int psp_sw_init(void *handle) return ret; ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); @@ -484,7 +485,8 @@ static int psp_sw_init(void *handle) goto failed1; ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, (void **)&psp->cmd_buf_mem); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6bbe3b89aef5..bc11ae56bba5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1708,15 +1708,20 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; } - ret = amdgpu_bo_create_kernel_at(adev, - adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, - adev->mman.discovery_tmr_size, - &adev->mman.discovery_memory, - NULL); - if (ret) { - DRM_ERROR("alloc tmr failed(%d)!\n", ret); - amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); - return ret; + if (!adev->gmc.is_app_apu) { + ret = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - + adev->mman.discovery_tmr_size, + adev->mman.discovery_tmr_size, + &adev->mman.discovery_memory, + NULL); + if (ret) { + DRM_ERROR("alloc tmr failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + return ret; + } + } else { + DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n"); } return 0; @@ -1765,10 +1770,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, adev->gmc.visible_vram_size); - else + else if (!adev->gmc.is_app_apu) #endif adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, adev->gmc.visible_vram_size); + else + DRM_DEBUG_DRIVER("No need to ioremap when real vram size is 0\n"); #endif /* @@ -1803,23 +1810,32 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS * and driver. */ - r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, - &adev->mman.stolen_vga_memory, - NULL); - if (r) - return r; - r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, - adev->mman.stolen_extended_size, - &adev->mman.stolen_extended_memory, - NULL); - if (r) - return r; - r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, - adev->mman.stolen_reserved_size, - &adev->mman.stolen_reserved_memory, - NULL); - if (r) - return r; + if (!adev->gmc.is_app_apu) { + r = amdgpu_bo_create_kernel_at(adev, 0, + adev->mman.stolen_vga_size, + &adev->mman.stolen_vga_memory, + NULL); + if (r) + return r; + + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, + adev->mman.stolen_extended_size, + &adev->mman.stolen_extended_memory, + NULL); + + if (r) + return r; + + r = amdgpu_bo_create_kernel_at(adev, + adev->mman.stolen_reserved_offset, + adev->mman.stolen_reserved_size, + &adev->mman.stolen_reserved_memory, + NULL); + if (r) + return r; + } else { + DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); + } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); @@ -1866,7 +1882,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("Failed initializing oa heap.\n"); return r; } - if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->mman.sdma_access_bo, NULL, @@ -1887,13 +1902,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_ttm_training_reserve_vram_fini(adev); /* return the stolen vga memory back to VRAM */ - amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); - amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); - /* return the IP Discovery TMR memory back to VRAM */ - amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); - if (adev->mman.stolen_reserved_size) - amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, - NULL, NULL); + if (!adev->gmc.is_app_apu) { + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); + /* return the IP Discovery TMR memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + if (adev->mman.stolen_reserved_size) + amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, + NULL, NULL); + } amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, &adev->mman.sdma_access_ptr); amdgpu_ttm_fw_reserve_vram_fini(adev); @@ -1935,7 +1952,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) int r; if (!adev->mman.initialized || amdgpu_in_reset(adev) || - adev->mman.buffer_funcs_enabled == enable) + adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) return; if (enable) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index df63dc3bca18..bc5d126b600b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -512,7 +512,12 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.size = amdgpu_vm_pt_size(adev, level); bp.byte_align = AMDGPU_GPU_PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + + if (!adev->gmc.is_app_apu) + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + else + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain); bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 1f1268cd5e09..42877c4505f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -459,7 +459,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; if (mec_hpd_size) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_gpu_addr, (void **)&hpd); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 16634a791e10..245de27c7540 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1593,8 +1593,13 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) int r; /* size in MB on si */ - adev->gmc.mc_vram_size = - adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; + if (!adev->gmc.is_app_apu) { + adev->gmc.mc_vram_size = + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; + } else { + DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n"); + adev->gmc.mc_vram_size = 0; + } adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU) && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 8b9accecf49b..f85ac4dbc673 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1026,6 +1026,12 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev) if (dev->kfd->local_mem_info.local_mem_size_private == 0 && dev->kfd->local_mem_info.local_mem_size_public > 0) return true; + + if (dev->kfd->local_mem_info.local_mem_size_public == 0 && dev->kfd->adev->gmc.is_app_apu) { + pr_debug("APP APU, Consider like a large bar system\n"); + return true; + } + return false; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 16475921587b..1aaf933f9f48 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -30,6 +30,9 @@ #include "amdgpu.h" #include "amdgpu_amdkfd.h" +/* Fixme: Fake 32GB for 1PNPS1 mode bringup */ +#define DUMMY_VRAM_SIZE 31138512896 + /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. * GPU processor ID are expressed with Bit[31]=1. * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs @@ -1053,6 +1056,8 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem, props->heap_type = heap_type; props->flags = flags; + if (size_in_bytes == 0) + size_in_bytes = DUMMY_VRAM_SIZE; /* Fixme: TBD */ props->size_in_bytes = size_in_bytes; props->width = width; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 2ddf5198e5c4..4dea79a0c5b5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -822,11 +822,20 @@ static int smu_init_fb_allocations(struct smu_context *smu) } } + driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT; /* VRAM allocation for driver table */ for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; + /* If one of the tables has VRAM domain restriction, keep it in + * VRAM + */ + if ((tables[i].domain & + (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) == + AMDGPU_GEM_DOMAIN_VRAM) + driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; + if (i == SMU_TABLE_PMSTATUSLOG) continue; @@ -836,7 +845,6 @@ static int smu_init_fb_allocations(struct smu_context *smu) driver_table->size = max_table_size; driver_table->align = PAGE_SIZE; - driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; ret = amdgpu_bo_create_kernel(adev, driver_table->size, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index ea8f3d6fb98b..8969b3ff5c8f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -220,10 +220,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); if (!smu_table->metrics_table) From f431393d605f55f8865dbf8ba8236760fbb0a3dc Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Fri, 27 Jan 2023 21:46:59 -0500 Subject: [PATCH 436/861] drm/amdgpu: Implement new dummy vram manager This adds dummy vram manager to support ASICs that do not have a dedicated or carvedout vram domain. Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 67 ++++++++++++++++++-- 1 file changed, 60 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 43d6a9d6a538..89d35d194f2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -370,6 +370,45 @@ out: return ret; } +static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) +{ + DRM_DEBUG_DRIVER("Dummy vram mgr debug\n"); +} + +static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man, + struct ttm_resource *res, + const struct ttm_place *place, + size_t size) +{ + DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n"); + return false; +} + +static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man, + struct ttm_resource *res, + const struct ttm_place *place, + size_t size) +{ + DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n"); + return true; +} + +static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man, + struct ttm_resource *res) +{ + DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n"); +} + +static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_resource **res) +{ + DRM_DEBUG_DRIVER("Dummy vram mgr new\n"); + return -ENOSPC; +} + /** * amdgpu_vram_mgr_new - allocate new ranges * @@ -817,6 +856,14 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, mutex_unlock(&mgr->lock); } +static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = { + .alloc = amdgpu_dummy_vram_mgr_new, + .free = amdgpu_dummy_vram_mgr_del, + .intersects = amdgpu_dummy_vram_mgr_intersects, + .compatible = amdgpu_dummy_vram_mgr_compatible, + .debug = amdgpu_dummy_vram_mgr_debug +}; + static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .alloc = amdgpu_vram_mgr_new, .free = amdgpu_vram_mgr_del, @@ -841,17 +888,22 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) ttm_resource_manager_init(man, &adev->mman.bdev, adev->gmc.real_vram_size); - man->func = &amdgpu_vram_mgr_func; - - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); - if (err) - return err; - mutex_init(&mgr->lock); INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reserved_pages); mgr->default_page_size = PAGE_SIZE; + if (!adev->gmc.is_app_apu) { + man->func = &amdgpu_vram_mgr_func; + + err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + if (err) + return err; + } else { + man->func = &amdgpu_dummy_vram_mgr_func; + DRM_INFO("Setup dummy vram mgr\n"); + } + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_resource_manager_set_used(man, true); return 0; @@ -886,7 +938,8 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) drm_buddy_free_list(&mgr->mm, &rsv->allocated); kfree(rsv); } - drm_buddy_fini(&mgr->mm); + if (!adev->gmc.is_app_apu) + drm_buddy_fini(&mgr->mm); mutex_unlock(&mgr->lock); ttm_resource_manager_cleanup(man); From 970c1646b5ac93a13496d3429aca3e799fa6cf07 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Fri, 27 Jan 2023 21:48:06 -0500 Subject: [PATCH 437/861] drm/amdgpu: Create VRAM BOs on GTT for GFXIP9.4.3 On GFXIP9.4.3 APP APU where there is no dedicated VRAM domain handle VRAM BO allocation requests on CPU domain and validate them on GTT. Support for handling multi-socket and multi-numa partitions within a socket will be added by future patches, this enables 1P NPS1 asic bringup configuration. Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4e179e50de25..bbdd5e3aa18e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1649,9 +1649,16 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( */ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; - alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? + + if (adev->gmc.is_app_apu) { + domain = AMDGPU_GEM_DOMAIN_GTT; + alloc_domain = AMDGPU_GEM_DOMAIN_CPU; + alloc_flags = 0; + } else { + alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; + alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; + } } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1738,6 +1745,13 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( (*mem)->domain = domain; (*mem)->mapped_to_gpu_memory = 0; (*mem)->process_info = avm->process_info; + + if (adev->gmc.is_app_apu && + ((*mem)->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) { + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + } + add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); if (user_addr) { From bb0ed57b4450e46de3651b310dcb4273032c3122 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 16 Mar 2023 11:08:06 +0800 Subject: [PATCH 438/861] drm/amdgpu: increase AMDGPU_MAX_RINGS On newer GPUs, the number of kernel rings are increased. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index add7cc2831b0..d2b1a8854603 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -37,7 +37,7 @@ struct amdgpu_job; struct amdgpu_vm; /* max number of rings */ -#define AMDGPU_MAX_RINGS 102 +#define AMDGPU_MAX_RINGS 124 #define AMDGPU_MAX_HWIP_RINGS 64 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_SW_GFX_RINGS 2 From 9e4216cf2dcccacd0e5dc932f26e35d18527ff41 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 5 May 2023 11:54:38 -0400 Subject: [PATCH 439/861] drm/amdgpu: Increase Max GPU instance to 64 Increase Max GPU instances to 64 to handle multi-socket system with GFX 9.4.3 asic. Signed-off-by: Mukul Joshi Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 279057ec7a0b..39743d44b567 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -108,7 +108,7 @@ #include "amdgpu_mca.h" #include "amdgpu_ras.h" -#define MAX_GPU_INSTANCE 16 +#define MAX_GPU_INSTANCE 64 struct amdgpu_gpu_instance { From 0c451baf3bff8e2a9fa45ef6471f9f25da00f39b Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 16 Mar 2023 17:42:49 +0800 Subject: [PATCH 440/861] drm/amdgpu: change the print level to warn for ip block disabled Avoid to mislead users as it's not a real error. Signed-off-by: Le Ma Reviewed-by: Asad Kamal Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c190365d67e2..7f62826fcaca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2292,7 +2292,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) total = true; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { - DRM_ERROR("disabled ip block: %d <%s>\n", + DRM_WARN("disabled ip block: %d <%s>\n", i, adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; } else { From 1ad29cb3438175a9517f3b6cfeb0e331be213d8c Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Wed, 22 Mar 2023 11:16:53 +0800 Subject: [PATCH 441/861] drm/amdgpu: fix sdma instance It should change logical instance to device instance to query ras info Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 570ea68c521f..bf47eb33c12e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2123,6 +2123,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, void *ras_err_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; + uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst); /* sdma v4_4_2 doesn't support query ce counts */ amdgpu_ras_inst_query_ras_error_count(adev, @@ -2130,7 +2131,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, ARRAY_SIZE(sdma_v4_2_2_ue_reg_list), sdma_v4_4_2_ras_memory_list, ARRAY_SIZE(sdma_v4_4_2_ras_memory_list), - sdma_inst, + sdma_dev_inst, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &err_data->ue_count); } @@ -2153,10 +2154,12 @@ static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev, uint32_t sdma_inst) { + uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst); + amdgpu_ras_inst_reset_ras_error_count(adev, sdma_v4_2_2_ue_reg_list, ARRAY_SIZE(sdma_v4_2_2_ue_reg_list), - sdma_inst); + sdma_dev_inst); } static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev) From 8d1c1bc13134ab90d773cb73c0298f2459703bee Mon Sep 17 00:00:00 2001 From: Asad kamal Date: Wed, 8 Feb 2023 20:19:13 +0800 Subject: [PATCH 442/861] drm/amd/pm: Update pmfw header files for SMU v13.0.6 Update driver metrics table for SMU v13.0.6 to be compatible with PMFW v85.47 version Signed-off-by: Asad kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h index be596777cd2c..370c6125d718 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h @@ -26,7 +26,7 @@ // *** IMPORTANT *** // PMFW TEAM: Always increment the interface version if // anything is changed in this file -#define SMU13_0_6_DRIVER_IF_VERSION 0x08042022 +#define SMU13_0_6_DRIVER_IF_VERSION 0x08042023 //I2C Interface #define NUM_I2C_CONTROLLERS 8 @@ -106,7 +106,7 @@ typedef enum { } UCLK_DPM_MODE_e; typedef struct { - //0-26 SOC, 27-29 SOCIO + //0-23 SOC, 24-26 SOCIO, 27-29 SOC uint16_t avgPsmCount[30]; uint16_t minPsmCount[30]; float avgPsmVoltage[30]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index bdccbb4a6276..3fe403615d86 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0x1 +#define SMU_METRICS_TABLE_VERSION 0x3 typedef struct { uint32_t AccumulationCounter; @@ -198,6 +198,10 @@ typedef struct { uint32_t SocketThmResidencyAcc; uint32_t VrThmResidencyAcc; uint32_t HbmThmResidencyAcc; + uint32_t spare; + + // New Items at end to maintain driver compatibility + uint32_t GfxclkFrequency[8]; } MetricsTable_t; #define SMU_VF_METRICS_TABLE_VERSION 0x1 From a1b0dafafca414cf8b3a51225215a21df2b3ddf8 Mon Sep 17 00:00:00 2001 From: Asad kamal Date: Wed, 8 Feb 2023 23:04:25 +0800 Subject: [PATCH 443/861] drm/amd/pm: Update gfx clock frequency for SMU v13.0.6 Update gfx clock frequency from metric table for SMU v13.0.6 Signed-off-by: Asad kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 8969b3ff5c8f..d0c49e8883e7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -660,7 +660,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, { struct smu_table_context *smu_table = &smu->smu_table; MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + struct amdgpu_device *adev = smu->adev; + uint32_t smu_version; int ret = 0; + int xcc_id; ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); if (ret) @@ -670,7 +673,13 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, switch (member) { case METRICS_CURR_GFXCLK: case METRICS_AVERAGE_GFXCLK: - *value = 0; + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (smu_version >= 0x552F00) { + xcc_id = GET_INST(GC, 0); + *value = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc_id]); + } else { + *value = 0; + } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: From 6d5f5eaf6acf26ce0dc986fe7240dc4a0c981119 Mon Sep 17 00:00:00 2001 From: Asad kamal Date: Mon, 13 Feb 2023 19:52:56 +0800 Subject: [PATCH 444/861] drm/amd/pm: Update pmfw header files for SMU v13.0.6 Update driver interface for SMU v13.0.6 to be compatible with PMFW v85.48 version Signed-off-by: Asad kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- .../pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h | 12 ++++++++++++ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 1 - 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h index 370c6125d718..8b7fa0fa59c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h @@ -121,6 +121,18 @@ typedef struct { float minPsmVoltage[30]; } AvfsDebugTableXcd_t; +// Defines used for IH-based thermal interrupts to GFX driver - A/X only +#define IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 + +//thermal over-temp mask defines +#define THROTTLER_TEMP_CCD_BIT 5 +#define THROTTLER_TEMP_XCD_BIT 6 +#define THROTTLER_TEMP_HBM_BIT 7 +#define THROTTLER_TEMP_AID_BIT 8 +#define THROTTLER_VRHOT_BIT 9 + + // These defines are used with the following messages: // SMC_MSG_TransferTableDram2Smu // SMC_MSG_TransferTableSmu2Dram diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index d0c49e8883e7..b08608caafd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -175,7 +175,6 @@ static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = { #define THROTTLER_PPT_BIT 1 #define THROTTLER_TEMP_SOC_BIT 2 #define THROTTLER_TEMP_VR_GFX_BIT 3 -#define THROTTLER_TEMP_HBM_BIT 4 static const uint8_t smu_v13_0_6_throttler_map[] = { [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT), From 676915e4108f3a8d98944e16e6ce00a6440ac701 Mon Sep 17 00:00:00 2001 From: Asad kamal Date: Wed, 15 Feb 2023 15:53:15 +0800 Subject: [PATCH 445/861] drm/amd/pm: Add ih for SMU v13.0.6 thermal throttling Add interrupt handler for thermal throttler events from PMFW on SMUv13.0.6 Signed-off-by: Asad kamal Acked-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 107 +++++++++++++++++- 1 file changed, 104 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index b08608caafd0..43a855de7e9e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1297,6 +1297,109 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu, return smu_v13_0_set_power_limit(smu, limit_type, limit); } +static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + /* + * ctxid is used to distinguish different + * events for SMCToHost interrupt + */ + uint32_t ctxid = entry->src_data[0]; + uint32_t data; + + if (client_id == SOC15_IH_CLIENTID_MP1) { + if (src_id == IH_INTERRUPT_ID_TO_DRIVER) { + /* ACK SMUToHost interrupt */ + data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); + + switch (ctxid) { + case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: + /* + * Increment the throttle interrupt counter + */ + atomic64_inc(&smu->throttle_int_counter); + + if (!atomic_read(&adev->throttling_logging_enabled)) + return 0; + + if (__ratelimit(&adev->throttling_logging_rs)) + schedule_work(&smu->throttling_logging_work); + + break; + } + } + } + + return 0; +} + +int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned tyep, + enum amdgpu_interrupt_state state) +{ + uint32_t val = 0; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + /* For MP1 SW irqs */ + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + + break; + case AMDGPU_IRQ_STATE_ENABLE: + /* For MP1 SW irqs */ + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + + break; + default: + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = +{ + .set = smu_v13_0_6_set_irq_state, + .process = smu_v13_0_6_irq_process, +}; + +int smu_v13_0_6_register_irq_handler(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct amdgpu_irq_src *irq_src = &smu->irq_source; + int ret = 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + irq_src->num_types = 1; + irq_src->funcs = &smu_v13_0_6_irq_funcs; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, + IH_INTERRUPT_ID_TO_DRIVER, + irq_src); + if (ret) + return ret; + + return ret; +} + static int smu_v13_0_6_system_features_control(struct smu_context *smu, bool enable) { @@ -2042,11 +2145,9 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .set_power_limit = smu_v13_0_6_set_power_limit, .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, - /* TODO: Thermal limits unknown, skip these for now - .register_irq_handler = smu_v13_0_register_irq_handler, + .register_irq_handler = smu_v13_0_6_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, .disable_thermal_alert = smu_v13_0_disable_thermal_alert, - */ .setup_pptable = smu_v13_0_6_setup_pptable, .baco_is_support = smu_v13_0_6_is_baco_supported, .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, From 9661bf687623f628729566cc3c58207c44e56258 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 21 Feb 2023 14:47:51 +0530 Subject: [PATCH 446/861] drm/amd/pm: Keep interface version in PMFW header Use the interface version directly from PMFW interface header file rather than keeping another definition in common smu13 file. Signed-off-by: Lijo Lazar Reviewed-by: Asad kamal Signed-off-by: Alex Deucher --- .../inc/pmfw_if/smu13_driver_if_aldebaran.h | 2 + .../inc/pmfw_if/smu13_driver_if_v13_0_0.h | 2 + .../inc/pmfw_if/smu13_driver_if_v13_0_4.h | 2 +- .../inc/pmfw_if/smu13_driver_if_v13_0_5.h | 2 +- .../inc/pmfw_if/smu13_driver_if_v13_0_7.h | 2 +- .../inc/pmfw_if/smu13_driver_if_yellow_carp.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 11 ------ .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 1 + .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 39 +------------------ .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 1 + .../drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 1 + .../drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c | 1 + .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 1 + .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 1 + .../drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 1 + 15 files changed, 17 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h index 90200f31ff52..cddf45eebee8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h @@ -24,6 +24,8 @@ #ifndef SMU13_DRIVER_IF_ALDEBARAN_H #define SMU13_DRIVER_IF_ALDEBARAN_H +#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 + #define NUM_VCLK_DPM_LEVELS 8 #define NUM_DCLK_DPM_LEVELS 8 #define NUM_SOCCLK_DPM_LEVELS 8 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index b686fb68a6e7..fe995651c6f5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -24,6 +24,8 @@ #ifndef SMU13_DRIVER_IF_V13_0_0_H #define SMU13_DRIVER_IF_V13_0_0_H +#define SMU13_0_0_DRIVER_IF_VERSION 0x32 + //Increment this version if SkuTable_t or BoardTable_t change #define PPTABLE_VERSION 0x26 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h index 2162ecd1057d..fee9293b3f97 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 8 +#define SMU13_0_4_DRIVER_IF_VERSION 8 typedef struct { int32_t value; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h index aa971412b434..7589faa0232d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h @@ -23,7 +23,7 @@ #ifndef __SMU13_DRIVER_IF_V13_0_5_H__ #define __SMU13_DRIVER_IF_V13_0_5_H__ -#define PMFW_DRIVER_IF_VERSION 4 +#define SMU13_0_5_DRIVER_IF_VERSION 4 // Throttler Status Bitmask #define THROTTLER_STATUS_BIT_SPL 0 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h index 4c46a0392451..44e879c51cae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h @@ -25,7 +25,7 @@ // *** IMPORTANT *** // PMFW TEAM: Always increment the interface version on any change to this file -#define SMU13_DRIVER_IF_VERSION 0x35 +#define SMU13_0_7_DRIVER_IF_VERSION 0x35 //Increment this version if SkuTable_t or BoardTable_t change #define PPTABLE_VERSION 0x27 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h index 25540cb28208..7417634827ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h @@ -26,7 +26,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU13_DRIVER_IF_VERSION 4 +#define SMU13_YELLOW_CARP_DRIVER_IF_VERSION 4 typedef struct { int32_t value; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index df3baaab0037..3ae8d5d252a3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -25,17 +25,6 @@ #include "amdgpu_smu.h" -#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF -#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 -#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_6 0x0 - #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms /* MP Apertures */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index d30ec3005ea1..e80f122d8aec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -2147,5 +2147,6 @@ void aldebaran_set_ppt_funcs(struct smu_context *smu) smu->clock_map = aldebaran_clk_map; smu->feature_map = aldebaran_feature_mask_map; smu->table_map = aldebaran_table_map; + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; smu_v13_0_set_smu_mailbox_registers(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 393c6a7b9609..b24c8549d0b1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -269,45 +269,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - if (smu->is_apu) + if (smu->is_apu || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6)) adev->pm.fw_version = smu_version; - switch (adev->ip_versions[MP1_HWIP][0]) { - case IP_VERSION(13, 0, 2): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; - break; - case IP_VERSION(13, 0, 0): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0; - break; - case IP_VERSION(13, 0, 10): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10; - break; - case IP_VERSION(13, 0, 7): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7; - break; - case IP_VERSION(13, 0, 1): - case IP_VERSION(13, 0, 3): - case IP_VERSION(13, 0, 8): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; - break; - case IP_VERSION(13, 0, 4): - case IP_VERSION(13, 0, 11): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_4; - break; - case IP_VERSION(13, 0, 5): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5; - break; - case IP_VERSION(13, 0, 6): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_6; - adev->pm.fw_version = smu_version; - break; - default: - dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", - adev->ip_versions[MP1_HWIP][0]); - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; - break; - } - /* only for dGPU w/ SMU13*/ if (adev->pm.fw) dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 09405ef1e3c8..f4783e685bf8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2199,5 +2199,6 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) smu->table_map = smu_v13_0_0_table_map; smu->pwr_src_map = smu_v13_0_0_pwr_src_map; smu->workload_map = smu_v13_0_0_workload_map; + smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION; smu_v13_0_0_set_smu_mailbox_registers(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 8fa9a36c38b6..0a0a7debb3ae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -1043,6 +1043,7 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu) smu->message_map = smu_v13_0_4_message_map; smu->feature_map = smu_v13_0_4_feature_mask_map; smu->table_map = smu_v13_0_4_table_map; + smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION; smu->is_apu = true; if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 66445964efbd..165b2470b017 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -1068,6 +1068,7 @@ void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu) smu->feature_map = smu_v13_0_5_feature_mask_map; smu->table_map = smu_v13_0_5_table_map; smu->is_apu = true; + smu->smc_driver_if_version = SMU13_0_5_DRIVER_IF_VERSION; smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_34); smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_2); smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_33); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 43a855de7e9e..b46e0414be60 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2176,5 +2176,6 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->clock_map = smu_v13_0_6_clk_map; smu->feature_map = smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; + smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 98a33f8ee209..1b6116cf8b4c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1797,5 +1797,6 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) smu->table_map = smu_v13_0_7_table_map; smu->pwr_src_map = smu_v13_0_7_pwr_src_map; smu->workload_map = smu_v13_0_7_workload_map; + smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 04e56b0b3033..ac5fcca0e47f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1234,5 +1234,6 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu) smu->feature_map = yellow_carp_feature_mask_map; smu->table_map = yellow_carp_table_map; smu->is_apu = true; + smu->smc_driver_if_version = SMU13_YELLOW_CARP_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); } From 909ae7155faebc62af461924a91071c0b9cc4e39 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 27 Feb 2023 16:51:16 +0530 Subject: [PATCH 447/861] drm/amd/pm: Initialize power limit for SMU v13.0.6 PMFW will initialize the power limit values even if PPT throttler feature is disabled. Fetch the limit value from FW. Signed-off-by: Lijo Lazar Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index b46e0414be60..7474d3ffab93 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1256,21 +1256,6 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu, uint32_t power_limit = 0; int ret; - if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { - if (current_power_limit) - *current_power_limit = 0; - if (default_power_limit) - *default_power_limit = 0; - if (max_power_limit) - *max_power_limit = 0; - - dev_warn( - smu->adev->dev, - "PPT feature is not enabled, power values can't be fetched."); - - return 0; - } - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit); if (ret) { From 8572fa2aa517d2512abba661ddd5e9a44a893dc9 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Fri, 3 Mar 2023 12:20:21 +0800 Subject: [PATCH 448/861] drm/amd/pm: Update hw mon attributes for GC version 9.4.3 Update hw mon attributes for GC Version 9.4.3 to valid ones on APU and Non APU systems v2: Group checks along existing one Added power limit & mclock for gc version 9.4.3 Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 36 +++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 58c2246918fd..385d83eb8706 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3362,7 +3362,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* Skip crit temp on APU */ - if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) && + if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) || + (gc_ver == IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) return 0; @@ -3395,9 +3396,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ effective_mode &= ~S_IWUSR; - /* In the case of APUs, this is only implemented on Vangogh */ + /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ if (((adev->family == AMDGPU_FAMILY_SI) || - ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) && + ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) && + (gc_ver != IP_VERSION(9, 4, 3)))) && (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power1_cap.dev_attr.attr || @@ -3426,25 +3428,39 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ - adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ + adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */ + (gc_ver == IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_in0_input.dev_attr.attr || attr == &sensor_dev_attr_in0_label.dev_attr.attr)) return 0; - /* only APUs have vddnb */ - if (!(adev->flags & AMD_IS_APU) && + /* only APUs other than gc 9,4,3 have vddnb */ + if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_in1_input.dev_attr.attr || attr == &sensor_dev_attr_in1_label.dev_attr.attr)) return 0; - /* no mclk on APUs */ - if ((adev->flags & AMD_IS_APU) && + /* no mclk on APUs other than gc 9,4,3*/ + if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) return 0; - /* only SOC15 dGPUs support hotspot and mem temperatures */ if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && + (gc_ver != IP_VERSION(9, 4, 3)) && + (attr == &sensor_dev_attr_temp2_input.dev_attr.attr || + attr == &sensor_dev_attr_temp2_label.dev_attr.attr)) + return 0; + + /* Only hotspot temperature for gc 9,4,3*/ + if ((gc_ver == IP_VERSION(9, 4, 3)) && + (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || + attr == &sensor_dev_attr_temp1_label.dev_attr.attr)) + return 0; + + /* only SOC15 dGPUs support hotspot and mem temperatures */ + if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) || + (gc_ver == IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || @@ -3452,9 +3468,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || - attr == &sensor_dev_attr_temp2_input.dev_attr.attr || attr == &sensor_dev_attr_temp3_input.dev_attr.attr || - attr == &sensor_dev_attr_temp2_label.dev_attr.attr || attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) return 0; From bfb4fd20b3e6997876068f469c14d963b227d896 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Wed, 8 Mar 2023 22:30:58 +0800 Subject: [PATCH 449/861] drm/amd/pm: Expose mem temperature for GC version 9.4.3 Add mem temperature as part of hw mon attributes for GC version 9.4.3 Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 385d83eb8706..40100b77b2d9 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3449,10 +3449,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && (gc_ver != IP_VERSION(9, 4, 3)) && (attr == &sensor_dev_attr_temp2_input.dev_attr.attr || - attr == &sensor_dev_attr_temp2_label.dev_attr.attr)) + attr == &sensor_dev_attr_temp2_label.dev_attr.attr || + attr == &sensor_dev_attr_temp3_input.dev_attr.attr || + attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) return 0; - /* Only hotspot temperature for gc 9,4,3*/ + /* hotspot temperature for gc 9,4,3*/ if ((gc_ver == IP_VERSION(9, 4, 3)) && (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || attr == &sensor_dev_attr_temp1_label.dev_attr.attr)) @@ -3467,9 +3469,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || - attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || - attr == &sensor_dev_attr_temp3_input.dev_attr.attr || - attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) + attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)) return 0; /* only Vangogh has fast PPT limit and power labels */ From fe9e5f56feb287b3f14b0a5892061a1da2b89b5b Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 10 Mar 2023 18:11:25 +0530 Subject: [PATCH 450/861] drm/amd/pm: Update PMFW headers for version 85.54 It adds message support for FW notification on driver unload. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- .../inc/pmfw_if/smu13_driver_if_v13_0_6.h | 18 ------------------ .../pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h | 3 ++- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h index 8b7fa0fa59c3..de84fff39799 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h @@ -132,22 +132,4 @@ typedef struct { #define THROTTLER_TEMP_AID_BIT 8 #define THROTTLER_VRHOT_BIT 9 - -// These defines are used with the following messages: -// SMC_MSG_TransferTableDram2Smu -// SMC_MSG_TransferTableSmu2Dram -// #define TABLE_PPTABLE 0 -// #define TABLE_AVFS_PSM_DEBUG 1 -// #define TABLE_AVFS_FUSE_OVERRIDE 2 -// #define TABLE_PMSTATUSLOG 3 -// #define TABLE_SMU_METRICS 4 -// #define TABLE_DRIVER_SMU_CONFIG 5 -// #define TABLE_I2C_COMMANDS 6 -// #define TABLE_COUNT 7 - -// // Table transfer status -// #define TABLE_TRANSFER_OK 0x0 -// #define TABLE_TRANSFER_FAILED 0xFF -// #define TABLE_TRANSFER_PENDING 0xAB - #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index b838e8db395a..ae4f44c4b877 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -82,7 +82,8 @@ #define PPSMC_MSG_SetSoftMaxGfxClk 0x31 #define PPSMC_MSG_GetMinGfxDpmFreq 0x32 #define PPSMC_MSG_GetMaxGfxDpmFreq 0x33 -#define PPSMC_Message_Count 0x34 +#define PPSMC_MSG_PrepareForDriverUnload 0x34 +#define PPSMC_Message_Count 0x35 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 From 7214c08c168046aadf15e3d731ee673f26c77213 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 10 Mar 2023 11:33:37 +0530 Subject: [PATCH 451/861] drm/amd/pm: Notify PMFW about driver unload cases On SMU v13.0.6 APUs, FW will need to take some actions if driver is going to halt RLC. Notify PMFW that driver is not going to manage device so that FW takes care of the required actions. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 7474d3ffab93..6dcafd04c98d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -122,6 +122,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0), MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0), MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), }; static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = { @@ -1385,14 +1386,34 @@ int smu_v13_0_6_register_irq_handler(struct smu_context *smu) return ret; } +static int smu_v13_0_6_notify_unload(struct smu_context *smu) +{ + uint32_t smu_version; + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (smu_version <= 0x553500) + return 0; + + dev_dbg(smu->adev->dev, "Notify PMFW about driver unload"); + /* Ignore return, just intimate FW that driver is not going to be there */ + smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + + return 0; +} + static int smu_v13_0_6_system_features_control(struct smu_context *smu, bool enable) { + struct amdgpu_device *adev = smu->adev; int ret; - /* Nothing to be done for APU */ - if (smu->adev->flags & AMD_IS_APU) + /* On APUs, notify FW that the device is no longer driver managed */ + if (adev->flags & AMD_IS_APU) { + if (!enable) + smu_v13_0_6_notify_unload(smu); + return 0; + } ret = smu_v13_0_system_features_control(smu, enable); From 8f2ccaaa373815ff94223dc2e3f6d53ff2f3ecb3 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 9 Mar 2023 13:04:56 +0530 Subject: [PATCH 452/861] drm/amdgpu: Add mode-2 reset in SMU v13.0.6 Modifications to mode-2 reset flow for SMU v13.0.6 ASICs. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 9 +++++++++ .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 16 +++++++--------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 9fbfd0811d06..082c1e9308d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -556,6 +556,15 @@ soc15_asic_reset_method(struct amdgpu_device *adev) if (connected_to_cpu) return AMD_RESET_METHOD_MODE2; break; + case IP_VERSION(13, 0, 6): + /* Use gpu_recovery param to target a reset method. + * Enable triggering of GPU reset only if specified + * by module parameter. + */ + if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5) + return AMD_RESET_METHOD_MODE2; + else + return AMD_RESET_METHOD_NONE; default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 6dcafd04c98d..4b808c0addc2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2024,27 +2024,27 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table static int smu_v13_0_6_mode2_reset(struct smu_context *smu) { - u32 smu_version; int ret = 0, index; struct amdgpu_device *adev = smu->adev; int timeout = 10; - smu_cmn_get_smc_version(smu, NULL, &smu_version); - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); mutex_lock(&smu->message_lock); + ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); + /* This is similar to FLR, wait till max FLR timeout */ msleep(100); + dev_dbg(smu->adev->dev, "restore config space...\n"); /* Restore the config space saved during init */ amdgpu_device_load_pci_state(adev->pdev); dev_dbg(smu->adev->dev, "wait for reset ack\n"); - while (ret == -ETIME && timeout) { + do { ret = smu_cmn_wait_for_response(smu); /* Wait a bit more time for getting ACK */ if (ret == -ETIME) { @@ -2053,16 +2053,14 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) continue; } - if (ret != 1) { + if (ret) { dev_err(adev->dev, - "failed to send mode2 message \tparam: 0x%08x response %#x\n", + "failed to send mode2 message \tparam: 0x%08x error code %d\n", SMU_RESET_MODE_2, ret); goto out; } - } + } while (ret == -ETIME && timeout); - if (ret == 1) - ret = 0; out: mutex_unlock(&smu->message_lock); From 463e953ea2eda25fbde70e0e72900f5bafeff93c Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 27 Apr 2023 10:36:51 +0800 Subject: [PATCH 453/861] drm/amd/pm: fix wrong smu socclk value fix typo about smu socclk value. Signed-off-by: Yang Wang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 4b808c0addc2..a712b2bf2d25 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -387,7 +387,7 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_SOCCLK: if (pptable->Init) - clock_limit = pptable->UclkFrequencyTable[0]; + clock_limit = pptable->SocclkFrequencyTable[0]; break; case SMU_FCLK: if (pptable->Init) From ded7d99eb5b78931cec30dd49cd4097d0ac770e1 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 16 Jan 2023 10:55:38 +0530 Subject: [PATCH 454/861] drm/amdgpu: Add flags for partition mode query It's not required to take lock on all cases while querying partition mode. Querying partition mode during KFD init process doesn't need to take a lock. Init process after a switch will already be happening under lock. Control the behaviour by adding flags to xcp_query_partition_mode. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 8 +++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 5 ++++- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 +++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 70c6099353b8..1487ecac2705 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1177,7 +1177,8 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, int mode; char *partition_mode; - mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index f59bc450cabe..5b999e5334bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -170,7 +170,7 @@ out: return ret; } -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) { int mode; @@ -180,7 +180,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) return xcp_mgr->mode; - mutex_lock(&xcp_mgr->xcp_lock); + if (!(flags & AMDGPU_XCP_FL_LOCKED)) + mutex_lock(&xcp_mgr->xcp_lock); mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); if (mode != xcp_mgr->mode) dev_WARN( @@ -188,7 +189,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) "Cached partition mode %d not matching with device mode %d", xcp_mgr->mode, mode); - mutex_unlock(&xcp_mgr->xcp_lock); + if (!(flags & AMDGPU_XCP_FL_LOCKED)) + mutex_unlock(&xcp_mgr->xcp_lock); return mode; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index f0b973c6092f..9fa6f0ea2061 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -30,6 +30,9 @@ #define AMDGPU_XCP_MODE_NONE -1 +#define AMDGPU_XCP_FL_NONE 0 +#define AMDGPU_XCP_FL_LOCKED (1 << 0) + enum AMDGPU_XCP_IP_BLOCK { AMDGPU_XCP_GFXHUB, AMDGPU_XCP_GFX, @@ -99,7 +102,7 @@ int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs); -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, enum AMDGPU_XCP_IP_BLOCK ip, int instance); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 42877c4505f1..69867294117e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1940,7 +1940,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { int r, i, num_xcc; - if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE) == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); num_xcc = NUM_XCC(adev->gfx.xcc_mask); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d7cffd91f1d7..4293cbf9ceb0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -645,7 +645,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ - partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr); + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, AMDGPU_XCP_FL_LOCKED); if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { From 46d79cbf9ac64a5e63f0c85f256ba6400a1f2024 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 20 Jan 2023 15:53:47 +0530 Subject: [PATCH 455/861] drm/amdgpu: Use transient mode during xcp switch During partition switch, keep the state as transient mode. Fetch the latest state if switch fails. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 18 +++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 1 + 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 5b999e5334bb..e8aa4d6c6b62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -139,7 +139,7 @@ static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) { - int ret, num_xcps = 0; + int ret, curr_mode, num_xcps = 0; if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) return -EINVAL; @@ -152,10 +152,22 @@ int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) mutex_lock(&xcp_mgr->xcp_lock); + curr_mode = xcp_mgr->mode; + /* State set to transient mode */ + xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS; + ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps); - if (ret) + if (ret) { + /* Failed, get whatever mode it's at now */ + if (xcp_mgr->funcs->query_partition_mode) + xcp_mgr->mode = amdgpu_xcp_query_partition_mode( + xcp_mgr, AMDGPU_XCP_FL_LOCKED); + else + xcp_mgr->mode = curr_mode; + goto out; + } if (!num_xcps || num_xcps > MAX_XCP) { ret = -EINVAL; @@ -183,7 +195,7 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) if (!(flags & AMDGPU_XCP_FL_LOCKED)) mutex_lock(&xcp_mgr->xcp_lock); mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); - if (mode != xcp_mgr->mode) + if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode) dev_WARN( xcp_mgr->adev->dev, "Cached partition mode %d not matching with device mode %d", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 9fa6f0ea2061..1d3dc7d68f54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -29,6 +29,7 @@ #define MAX_XCP 8 #define AMDGPU_XCP_MODE_NONE -1 +#define AMDGPU_XCP_MODE_TRANS -2 #define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_LOCKED (1 << 0) From 01ef47477d05b784ab6ac26fa6878987eda436f1 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 2 Feb 2023 15:13:12 +0530 Subject: [PATCH 456/861] drm/amdgpu: Add FGCG for GFX v9.4.3 It's not fine grain, behaves similar to MGCG. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 082c1e9308d0..122ba1a505c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1156,8 +1156,9 @@ static int soc15_common_early_init(void *handle) adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_IH_CG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | From f4d8b6f5c61ab5e98258bd0072d733741c76bd8d Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 Jan 2023 11:23:50 -0500 Subject: [PATCH 457/861] drm/amdkfd: Enable SVM on Native mode This patch enables SVM capability on GFX9.4.3 when run in Native mode. It also sets best_prefetch and best_restore locations to CPU as there is no VRAM. Signed-off-by: Mukul Joshi Acked-by: Rajneesh Bhardwaj Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++++++ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2b79849ddd30..cf354f9e4285 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2543,6 +2543,9 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } + if (node->adev->gmc.is_app_apu) + return 0; + if (prange->preferred_loc == gpuid || prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; @@ -3256,6 +3259,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) goto out; } + if (bo_node->adev->gmc.is_app_apu) { + best_loc = 0; + goto out; + } + if (p->xnack_enabled) bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 5116786718b6..7515ddade3ae 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -200,7 +200,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. */ -#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0) +#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0 ||\ + (dev)->adev->gmc.is_app_apu) void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); From 4d5275ab0b18d17697392aafd93e206e6b9de647 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 27 Jan 2023 18:18:17 +0530 Subject: [PATCH 458/861] drm/amdgpu: Add parsing of acpi xcc objects Add parsing of ACPI xcc objects and fill in relevant info from them by invoking the DSM methods. Signed-off-by: Lijo Lazar Reviewed-and-tested-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 294 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + 3 files changed, 297 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 39743d44b567..880bf9d67284 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1404,11 +1404,13 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); +void amdgpu_acpi_release(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } +static inline void amdgpu_acpi_release(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state) { return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index aeeec211861c..a3a2ef43abfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -38,6 +38,43 @@ #include "amd_acpi.h" #include "atom.h" +/* Declare GUID for AMD _DSM method for XCCs */ +static const guid_t amd_xcc_dsm_guid = GUID_INIT(0x8267f5d5, 0xa556, 0x44f2, + 0xb8, 0xb4, 0x45, 0x56, 0x2e, + 0x8c, 0x5b, 0xec); + +#define AMD_XCC_HID_START 3000 +#define AMD_XCC_DSM_GET_NUM_FUNCS 0 +#define AMD_XCC_DSM_GET_SUPP_MODE 1 +#define AMD_XCC_DSM_GET_XCP_MODE 2 +#define AMD_XCC_DSM_GET_VF_XCC_MAPPING 4 +#define AMD_XCC_DSM_GET_TMR_INFO 5 +#define AMD_XCC_DSM_NUM_FUNCS 5 + +#define AMD_XCC_MAX_HID 24 + +/* Encapsulates the XCD acpi object information */ +struct amdgpu_acpi_xcc_info { + struct list_head list; + int mem_node; + uint8_t xcp_node; + uint8_t phy_id; + acpi_handle handle; +}; + +struct amdgpu_acpi_dev_info { + struct list_head list; + struct list_head xcc_list; + uint16_t bdf; + uint16_t supp_xcp_mode; + uint16_t xcp_mode; + uint16_t mem_mode; + uint64_t tmr_base; + uint64_t tmr_size; +}; + +struct list_head amdgpu_acpi_dev_list; + struct amdgpu_atif_notification_cfg { bool enabled; int command_code; @@ -801,6 +838,240 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta return r; } +/** + * amdgpu_acpi_get_node_id - obtain the NUMA node id for corresponding amdgpu + * acpi device handle + * + * @handle: acpi handle + * @nid: NUMA Node id returned by the platform firmware + * + * Queries the ACPI interface to fetch the corresponding NUMA Node ID for a + * given amdgpu acpi device. + * + * Returns ACPI STATUS OK with Node ID on success or the corresponding failure reason + */ +acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, int *nid) +{ +#ifdef CONFIG_ACPI_NUMA + u64 pxm; + acpi_status status; + + status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); + + if (ACPI_FAILURE(status)) + return status; + + *nid = pxm_to_node(pxm); + + return_ACPI_STATUS(AE_OK); +#else + return_ACPI_STATUS(AE_NOT_EXIST); +#endif +} + +struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) +{ + struct amdgpu_acpi_dev_info *acpi_dev; + + if (list_empty(&amdgpu_acpi_dev_list)) + return NULL; + + list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list) + if (acpi_dev->bdf == bdf) + return acpi_dev; + + return NULL; +} + +static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, + struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf) +{ + struct amdgpu_acpi_dev_info *tmp; + union acpi_object *obj; + int ret = -ENOENT; + + *dev_info = NULL; + tmp = kzalloc(sizeof(struct amdgpu_acpi_dev_info), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + INIT_LIST_HEAD(&tmp->xcc_list); + INIT_LIST_HEAD(&tmp->list); + tmp->bdf = bdf; + + obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, + AMD_XCC_DSM_GET_SUPP_MODE, NULL, + ACPI_TYPE_INTEGER); + + if (!obj) { + acpi_handle_debug(xcc_info->handle, + "_DSM function %d evaluation failed", + AMD_XCC_DSM_GET_SUPP_MODE); + ret = -ENOENT; + goto out; + } + + tmp->supp_xcp_mode = obj->integer.value & 0xFFFF; + ACPI_FREE(obj); + + obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, + AMD_XCC_DSM_GET_XCP_MODE, NULL, + ACPI_TYPE_INTEGER); + + if (!obj) { + acpi_handle_debug(xcc_info->handle, + "_DSM function %d evaluation failed", + AMD_XCC_DSM_GET_XCP_MODE); + ret = -ENOENT; + goto out; + } + + tmp->xcp_mode = obj->integer.value & 0xFFFF; + tmp->mem_mode = (obj->integer.value >> 32) & 0xFFFF; + ACPI_FREE(obj); + + /* Evaluate DSMs and fill XCC information */ + obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, + AMD_XCC_DSM_GET_TMR_INFO, NULL, + ACPI_TYPE_PACKAGE); + + if (!obj || obj->package.count < 2) { + acpi_handle_debug(xcc_info->handle, + "_DSM function %d evaluation failed", + AMD_XCC_DSM_GET_TMR_INFO); + ret = -ENOENT; + goto out; + } + + tmp->tmr_base = obj->package.elements[0].integer.value; + tmp->tmr_size = obj->package.elements[1].integer.value; + ACPI_FREE(obj); + + DRM_DEBUG_DRIVER( + "New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ", + tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, + tmp->tmr_base, tmp->tmr_size); + list_add_tail(&tmp->list, &amdgpu_acpi_dev_list); + *dev_info = tmp; + + return 0; + +out: + if (obj) + ACPI_FREE(obj); + kfree(tmp); + + return ret; +} + +static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, + u16 *bdf) +{ + union acpi_object *obj; + acpi_status status; + int ret = -ENOENT; + + obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, + AMD_XCC_DSM_GET_NUM_FUNCS, NULL, + ACPI_TYPE_INTEGER); + + if (!obj || obj->integer.value != AMD_XCC_DSM_NUM_FUNCS) + goto out; + ACPI_FREE(obj); + + /* Evaluate DSMs and fill XCC information */ + obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, + AMD_XCC_DSM_GET_VF_XCC_MAPPING, NULL, + ACPI_TYPE_INTEGER); + + if (!obj) { + acpi_handle_debug(xcc_info->handle, + "_DSM function %d evaluation failed", + AMD_XCC_DSM_GET_VF_XCC_MAPPING); + ret = -EINVAL; + goto out; + } + + /* PF xcc id [39:32] */ + xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF; + /* xcp node of this xcc [47:40] */ + xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF; + /* PF bus/dev/fn of this xcc [63:48] */ + *bdf = (obj->integer.value >> 48) & 0xFFFF; + ACPI_FREE(obj); + obj = NULL; + + status = amdgpu_acpi_get_node_id(xcc_info->handle, &xcc_info->mem_node); + + /* TODO: check if this check is required */ + if (ACPI_SUCCESS(status)) + ret = 0; +out: + if (obj) + ACPI_FREE(obj); + + return ret; +} + +static int amdgpu_acpi_enumerate_xcc(void) +{ + struct amdgpu_acpi_dev_info *dev_info = NULL; + struct amdgpu_acpi_xcc_info *xcc_info; + struct acpi_device *acpi_dev; + char hid[ACPI_ID_LEN]; + int ret, id; + u16 bdf; + + INIT_LIST_HEAD(&amdgpu_acpi_dev_list); + + for (id = 0; id < AMD_XCC_MAX_HID; id++) { + sprintf(hid, "%s%d", "AMD", AMD_XCC_HID_START + id); + acpi_dev = acpi_dev_get_first_match_dev(hid, NULL, -1); + /* These ACPI objects are expected to be in sequential order. If + * one is not found, no need to check the rest. + */ + if (!acpi_dev) { + DRM_DEBUG_DRIVER("No matching acpi device found for %s", + hid); + break; + } + + xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info), + GFP_KERNEL); + if (!xcc_info) { + DRM_ERROR("Failed to allocate memory for xcc info\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&xcc_info->list); + xcc_info->handle = acpi_device_handle(acpi_dev); + acpi_dev_put(acpi_dev); + + ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf); + if (ret) { + kfree(xcc_info); + continue; + } + + dev_info = amdgpu_acpi_get_dev(bdf); + + if (!dev_info) + ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf); + + if (ret == -ENOMEM) + return ret; + + if (!dev_info) { + kfree(xcc_info); + continue; + } + + list_add_tail(&xcc_info->list, &dev_info->xcc_list); + } + + return 0; +} + /** * amdgpu_acpi_event - handle notify events * @@ -1054,6 +1325,29 @@ void amdgpu_acpi_detect(void) } else { atif->backlight_caps.caps_valid = false; } + + amdgpu_acpi_enumerate_xcc(); +} + +void amdgpu_acpi_release(void) +{ + struct amdgpu_acpi_dev_info *dev_info, *dev_tmp; + struct amdgpu_acpi_xcc_info *xcc_info, *xcc_tmp; + + if (list_empty(&amdgpu_acpi_dev_list)) + return; + + list_for_each_entry_safe(dev_info, dev_tmp, &amdgpu_acpi_dev_list, + list) { + list_for_each_entry_safe(xcc_info, xcc_tmp, &dev_info->xcc_list, + list) { + list_del(&xcc_info->list); + kfree(xcc_info); + } + + list_del(&dev_info->list); + kfree(dev_info); + } } #if IS_ENABLED(CONFIG_SUSPEND) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9e9da2ac5c82..f319318a8813 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2898,6 +2898,7 @@ static void __exit amdgpu_exit(void) amdgpu_amdkfd_fini(); pci_unregister_driver(&amdgpu_kms_pci_driver); amdgpu_unregister_atpx_handler(); + amdgpu_acpi_release(); amdgpu_sync_fini(); amdgpu_fence_slab_fini(); mmu_notifier_synchronize(); From 6e01882267a696b022cfe3473a0d3e5ccbe54010 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 27 Jan 2023 18:40:14 +0530 Subject: [PATCH 459/861] drm/amdgpu: Add API to get tmr info from acpi In certain configs, TMR information is available from ACPI. Add API to fetch the information. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 20 ++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 880bf9d67284..f4461bc8b1fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1400,6 +1400,8 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state); int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); +int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, + u64 *tmr_size); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); @@ -1407,6 +1409,11 @@ void amdgpu_acpi_detect(void); void amdgpu_acpi_release(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } +static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, + u64 *tmr_offset, u64 *tmr_size) +{ + return -EINVAL; +} static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index a3a2ef43abfc..9dbdd699dcea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1072,6 +1072,26 @@ static int amdgpu_acpi_enumerate_xcc(void) return 0; } +int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, + u64 *tmr_size) +{ + struct amdgpu_acpi_dev_info *dev_info; + u16 bdf; + + if (!tmr_offset || !tmr_size) + return -EINVAL; + + bdf = (adev->pdev->bus->number << 8) | adev->pdev->devfn; + dev_info = amdgpu_acpi_get_dev(bdf); + if (!dev_info) + return -ENOENT; + + *tmr_offset = dev_info->tmr_base; + *tmr_size = dev_info->tmr_size; + + return 0; +} + /** * amdgpu_acpi_event - handle notify events * From 368bb1bcfb3a3bc70793cd347abe0bc60c01d94b Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 30 Jan 2023 09:38:09 +0530 Subject: [PATCH 460/861] drm/amdgpu: Read discovery info from system memory On certain ASICs, discovery info is available at reserved region in system memory. The location is available through ACPI interface. Add API to read discovery info from there. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 6701f17a4db6..246070938c41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -203,6 +203,29 @@ static int hw_id_map[MAX_HWIP] = { [PCIE_HWIP] = PCIE_HWID, }; +static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) +{ + u64 tmr_offset, tmr_size, pos; + void *discv_regn; + int ret; + + ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); + if (ret) + return ret; + + pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; + + /* This region is read-only and reserved from system use */ + discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); + if (discv_regn) { + memcpy(binary, discv_regn, adev->mman.discovery_tmr_size); + memunmap(discv_regn); + return 0; + } + + return -ENOENT; +} + static void amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) { uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; From ac772a3c07cad66df46b2781f39121be211d383e Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 30 Jan 2023 09:48:39 +0530 Subject: [PATCH 461/861] drm/amdgpu: Add fallback path for discovery info If SOC doesn't expose dedicated vram, discovery region may be available through system memory. Rename the existing interface to generic read_binary_from_mem and add a fallback path to read from system memory. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 246070938c41..01b21988c1ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -226,13 +226,21 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, return -ENOENT; } -static void amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) +static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, + uint8_t *binary) { uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; + int ret = 0; - amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, - adev->mman.discovery_tmr_size, false); + if (vram_size) { + uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, + adev->mman.discovery_tmr_size, false); + } else { + ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); + } + + return ret; } static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) @@ -338,7 +346,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) } } else { - amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_mem( + adev, adev->mman.discovery_bin); + if (r) + goto out; } /* check the ip discovery binary signature */ From 75dda67c7213c3e0d17244a8c42547c27ee746f8 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 9 Feb 2023 18:23:16 -0500 Subject: [PATCH 462/861] drm/amdkfd: Flush TLB after unmapping for GFX v9.4.3 kfd_flush_tlb_after_unmap should return true for GFX v9.4.3, to do TLB heavyweight flush after unmapping from GPU to guarantee that the GPU will not access pages after they have been unmapped. This also helps improve the mapping to GPU performance. Without this, KFD accidently flush TLB after mapping to GPU because the vm update sequence number is increased by previous unmapping. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index fb3cf2c51da8..6e1c15682c28 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1405,9 +1405,9 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { - return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || - (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && - dev->adev->sdma.instance[0].fw_version >= 18) || + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || + KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); } From f2b8447b1f309901c3fdd4045febfe5cab545d87 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 10 Feb 2023 12:05:59 +0530 Subject: [PATCH 463/861] drm/amdgpu: Fix discovery sys node harvest info Initalize syfs nodes after harvest information is fetched and fetch the correct harvest info based on each IP instance. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 41 +++++++++++++++++-- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 01b21988c1ae..d81b2e1e8aee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -877,6 +877,36 @@ static void ip_disc_release(struct kobject *kobj) kfree(ip_top); } +static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, + uint16_t hw_id, uint8_t inst) +{ + uint8_t harvest = 0; + + /* Until a uniform way is figured, get mask based on hwid */ + switch (hw_id) { + case VCN_HWID: + harvest = (1 << inst) & adev->vcn.harvest_config; + break; + case DMU_HWID: + if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) + harvest = 0x1; + break; + case UMC_HWID: + /* TODO: It needs another parsing; for now, ignore.*/ + break; + case GC_HWID: + harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; + break; + case SDMA0_HWID: + harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; + break; + default: + break; + } + + return harvest; +} + static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, struct ip_die_entry *ip_die_entry, const size_t _ip_offset, const int num_ips, @@ -949,7 +979,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, ip_hw_instance->major = ip->major; ip_hw_instance->minor = ip->minor; ip_hw_instance->revision = ip->revision; - ip_hw_instance->harvest = ip->variant; + ip_hw_instance->harvest = + amdgpu_discovery_get_harvest_info( + adev, ip_hw_instance->hw_id, + ip_hw_instance->num_instance); ip_hw_instance->num_base_addresses = ip->num_base_address; for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { @@ -1035,6 +1068,9 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) struct kset *die_kset; int res, ii; + if (!adev->mman.discovery_bin) + return -EINVAL; + adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); if (!adev->ip_top) return -ENOMEM; @@ -1282,8 +1318,6 @@ next_ip: } } - amdgpu_discovery_sysfs_init(adev); - return 0; } @@ -2224,6 +2258,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) } amdgpu_discovery_init_soc_config(adev); + amdgpu_discovery_sysfs_init(adev); switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): From 5ae0ec8b8045d72467d4e7417b34a5ab2fa72138 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 13 Feb 2023 15:45:42 +0800 Subject: [PATCH 464/861] drm/amdgpu: preserve the num_links in case of reflection For topology reflection, each socket to every other socket has the exactly same topology info as the other way around. So it is safe to keep the reflected num_links value otherwise it will be overriden by the link info output of GET_PEER_LINKS command. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4395c53d09d8..ea47012795e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1402,7 +1402,8 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, topology->nodes[i].num_links = get_extended_data ? topology->nodes[i].num_links + link_info_output->nodes[i].num_links : - link_info_output->nodes[i].num_links; + ((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links : + link_info_output->nodes[i].num_links); /* reflect the topology information for bi-directionality */ if (requires_reflection && topology->nodes[i].num_hops) From 48d19834ea551af2932090ff6de04730007e9876 Mon Sep 17 00:00:00 2001 From: Sonny Jiang Date: Wed, 15 Feb 2023 13:56:57 -0500 Subject: [PATCH 465/861] drm/amdgpu: Load vcn_v4_0_3 ucode during early_init VCN loading ucode is moved to early_init with using 'amdgpu_ucode_*' helpers. Reviewed-by: Leo Liu Signed-off-by: Sonny Jiang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 746df23b2eaa..308dfe80a87c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -69,7 +69,7 @@ static int vcn_v4_0_3_early_init(void *handle) vcn_v4_0_3_set_unified_ring_funcs(adev); vcn_v4_0_3_set_irq_funcs(adev); - return 0; + return amdgpu_vcn_early_init(adev); } /** From aaf1090a6cb66e8b6d9da63ee983604a7eca8ffd Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 17 Feb 2023 18:27:51 +0530 Subject: [PATCH 466/861] drm/amdgpu: Add instance mask for VCN and JPEG Keep an instance mask formed by physical instance numbers for VCN and JPEG IPs. Populate the mask from discovery table information. Signed-off-by: Lijo Lazar Acked-by: Leo Liu Tested-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 30 ++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index d81b2e1e8aee..ae5852f80549 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -549,10 +549,19 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, switch (le16_to_cpu(ip->hw_id)) { case VCN_HWID: (*vcn_harvest_count)++; - if (ip->instance_number == 0) + if (ip->instance_number == 0) { adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; - else + adev->vcn.inst_mask &= + ~AMDGPU_VCN_HARVEST_VCN0; + adev->jpeg.inst_mask &= + ~AMDGPU_VCN_HARVEST_VCN0; + } else { adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + adev->vcn.inst_mask &= + ~AMDGPU_VCN_HARVEST_VCN1; + adev->jpeg.inst_mask &= + ~AMDGPU_VCN_HARVEST_VCN1; + } break; case DMU_HWID: adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; @@ -601,6 +610,11 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, (1 << harvest_info->list[i].number_instance); adev->jpeg.harvest_config |= (1 << harvest_info->list[i].number_instance); + + adev->vcn.inst_mask &= + ~(1U << harvest_info->list[i].number_instance); + adev->jpeg.inst_mask &= + ~(1U << harvest_info->list[i].number_instance); break; case DMU_HWID: adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; @@ -1188,6 +1202,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) adev->gfx.xcc_mask = 0; adev->sdma.sdma_mask = 0; + adev->vcn.inst_mask = 0; + adev->jpeg.inst_mask = 0; bhdr = (struct binary_header *)adev->mman.discovery_bin; ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); @@ -1235,12 +1251,18 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = ip->revision & 0xc0; ip->revision &= ~0xc0; - if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES) + if (adev->vcn.num_vcn_inst < + AMDGPU_MAX_VCN_INSTANCES) { adev->vcn.num_vcn_inst++; - else + adev->vcn.inst_mask |= + (1U << ip->instance_number); + adev->jpeg.inst_mask |= + (1U << ip->instance_number); + } else { dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", adev->vcn.num_vcn_inst + 1, AMDGPU_MAX_VCN_INSTANCES); + } } if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || le16_to_cpu(ip->hw_id) == SDMA1_HWID || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index bb700a2b97c2..90516f623f56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -59,6 +59,8 @@ struct amdgpu_jpeg { atomic_t total_submission_cnt; struct ras_common_if *ras_if; struct amdgpu_jpeg_ras *ras; + + uint16_t inst_mask; uint8_t num_inst_per_aid; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 6ea3f076257e..47463ef10fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -273,6 +273,7 @@ struct amdgpu_vcn { struct ras_common_if *ras_if; struct amdgpu_vcn_ras *ras; + uint16_t inst_mask; uint8_t num_inst_per_aid; }; From 07bc0ac8ff49c9868a66526634fbc21cb194afca Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 17 Feb 2023 18:44:20 +0530 Subject: [PATCH 467/861] drm/amdgpu: Add VCN logical to physical id mapping Add mappings for logical to physical id for VCN/JPEG 4.0.3 v2: make local function static (Alex) Signed-off-by: Lijo Lazar Acked-by: Leo Liu Tested-by: James Zhu Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index fdc728f678d7..90fe77db9bee 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -82,6 +82,21 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, return dev_inst; } +static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, + enum amd_hw_ip_block_type ip_block, + uint32_t inst_mask) +{ + int l = 0, i; + + while (inst_mask) { + i = ffs(inst_mask) - 1; + adev->ip_map.dev_inst[ip_block][l++] = i; + inst_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[ip_block][l] = -1; +} + void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) { int xcc_mask, sdma_mask; @@ -108,6 +123,9 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) for (; l < HWIP_MAX_INSTANCE; l++) adev->ip_map.dev_inst[SDMA0_HWIP][l] = -1; + /* This covers both VCN and JPEG, JPEG is only alias of VCN */ + aqua_vanjaram_populate_ip_map(adev, VCN_HWIP, adev->vcn.inst_mask); + adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } From fd91d38b5275959a5b0804d4b4dbc5a4c0a8aac9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 17 Feb 2023 19:41:06 +0530 Subject: [PATCH 468/861] drm/amdgpu: Use logical ids for VCN/JPEG v4.0.3 Address VCN/JPEG instances using logical ids. Whenever register access is required, get the physical instance using GET_INST. Signed-off-by: Lijo Lazar Acked-by: Leo Liu Tested-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 29 +- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 14 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 247 +++++----- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 429 ++++++++++-------- 4 files changed, 405 insertions(+), 314 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 47463ef10fce..1eb9ccd1d83d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -141,18 +141,23 @@ RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ - do { \ - if (!indirect) { \ - WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ - (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ - } else { \ - *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \ - *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \ - } \ +#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ + do { \ + if (!indirect) { \ + WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \ + mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15( \ + VCN, GET_INST(VCN, inst_idx), \ + mmUVD_DPG_LMA_CTL, \ + (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + } else { \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ + offset; \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ + value; \ + } \ } while (0) #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 90fe77db9bee..51d3cb81e37a 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -70,6 +70,8 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, switch (block) { case GC_HWIP: case SDMA0_HWIP: + /* Both JPEG and VCN as JPEG is only alias of VCN */ + case VCN_HWIP: dev_inst = adev->ip_map.dev_inst[block][inst]; break; default: @@ -379,7 +381,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { u32 mask, inst_mask = adev->sdma.sdma_mask; - int ret, i, num_inst; + int ret, i; /* generally 1 AID supports 4 instances */ adev->sdma.num_inst_per_aid = 4; @@ -394,11 +396,15 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) adev->aid_mask |= (1 << i); } - num_inst = hweight32(adev->aid_mask); + /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be + * addressed based on logical instance ids. + */ + adev->vcn.harvest_config = 0; adev->vcn.num_inst_per_aid = 1; - adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * num_inst; + adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask); + adev->jpeg.harvest_config = 0; adev->jpeg.num_inst_per_aid = 1; - adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * num_inst; + adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask); ret = aqua_vanjaram_xcp_mgr_init(adev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index aa14a6619e9a..c0e90e27f24b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -82,7 +82,7 @@ static int jpeg_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, j, r; + int i, j, r, jpeg_inst; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { /* JPEG TRAP */ @@ -101,14 +101,15 @@ static int jpeg_v4_0_3_sw_init(void *handle) return r; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; + jpeg_inst = GET_INST(JPEG, i); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; ring->use_doorbell = true; ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + j + 9 * i; + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 1 + j + 9 * jpeg_inst; sprintf(ring->name, "jpeg_dec_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -118,8 +119,10 @@ static int jpeg_v4_0_3_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch[j] = regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; adev->jpeg.inst[i].external.jpeg_pitch[j] = - SOC15_REG_OFFSET1(JPEG, i, regUVD_JRBC0_UVD_JRBC_SCRATCH0, - (j?(0x40 * j - 0xc80):0)); + SOC15_REG_OFFSET1( + JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_SCRATCH0, + (j ? (0x40 * j - 0xc80) : 0)); } } @@ -157,25 +160,30 @@ static int jpeg_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, j, r; + int i, j, r, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; + jpeg_inst = GET_INST(JPEG, i); + ring = adev->jpeg.inst[i].ring_dec; if (ring->use_doorbell) - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, + adev->nbio.funcs->vcn_doorbell_range( + adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * jpeg_inst, adev->jpeg.inst[i].aid_id); for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; if (ring->use_doorbell) - WREG32_SOC15_OFFSET(VCN, i, regVCN_JPEG_DB_CTRL, - (ring->pipe?(ring->pipe - 0x15):0), - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); + WREG32_SOC15_OFFSET( + VCN, GET_INST(VCN, i), + regVCN_JPEG_DB_CTRL, + (ring->pipe ? (ring->pipe - 0x15) : 0), + ring->doorbell_index + << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); r = amdgpu_ring_test_helper(ring); if (r) return r; @@ -250,10 +258,11 @@ static int jpeg_v4_0_3_resume(void *handle) static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) { + int i, jpeg_inst; uint32_t data; - int i; - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL); + jpeg_inst = GET_INST(JPEG, inst_idx); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1)); @@ -263,21 +272,22 @@ static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int ins data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE); data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i); - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data); } static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) { + int i, jpeg_inst; uint32_t data; - int i; - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL); + jpeg_inst = GET_INST(JPEG, inst_idx); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1); @@ -287,13 +297,13 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE); data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i); - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data); } /** @@ -306,34 +316,36 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst static int jpeg_v4_0_3_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int i, j; + int i, j, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; - WREG32_SOC15(JPEG, i, regUVD_PGFSM_CONFIG, - 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, i, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDJ_PWR_ON << - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + jpeg_inst = GET_INST(JPEG, i); + + WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG, + 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG( + JPEG, jpeg_inst, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_ON + << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, + regUVD_JPEG_POWER_STATUS), + 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); /* JPEG disable CGC */ jpeg_v4_0_3_disable_clock_gating(adev, i); /* MJPEG global tiling registers */ - WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); @@ -341,25 +353,40 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) ring = &adev->jpeg.inst[i].ring_dec[j]; /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC0_MASK << j, - ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j)); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, + regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC0_MASK << j, + ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j)); - WREG32_SOC15_OFFSET(JPEG, i, - regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, - (0x00000001L | 0x00000002L)); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, + reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_CNTL, + reg_offset, + (0x00000001L | 0x00000002L)); + WREG32_SOC15_OFFSET( + JPEG, jpeg_inst, + regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, reg_offset, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + WREG32_SOC15_OFFSET( + JPEG, jpeg_inst, + regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, reg_offset, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_RPTR, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, - 0x00000002L); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_SIZE, reg_offset, - ring->ring_size / 4); - ring->wptr = RREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_WPTR, + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_RPTR, + reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_WPTR, + reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_CNTL, + reg_offset, 0x00000002L); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_SIZE, + reg_offset, ring->ring_size / 4); + ring->wptr = RREG32_SOC15_OFFSET( + JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset); } } @@ -376,29 +403,29 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) */ static int jpeg_v4_0_3_stop(struct amdgpu_device *adev) { - int i; + int i, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; - + jpeg_inst = GET_INST(JPEG, i); /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); jpeg_v4_0_3_enable_clock_gating(adev, i); /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, + regUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - WREG32_SOC15(JPEG, i, regUVD_PGFSM_CONFIG, - 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, i, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDJ_PWR_OFF << - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG, + 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG( + JPEG, jpeg_inst, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_OFF + << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); } @@ -416,8 +443,9 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_RPTR, - ring->pipe?(0x40 * ring->pipe - 0xc80):0); + return RREG32_SOC15_OFFSET( + JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR, + ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0); } /** @@ -434,8 +462,10 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR, - ring->pipe?(0x40 * ring->pipe - 0xc80):0); + return RREG32_SOC15_OFFSET( + JPEG, GET_INST(JPEG, ring->me), + regUVD_JRBC0_UVD_JRBC_RB_WPTR, + ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0); } /** @@ -453,8 +483,11 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR, - (ring->pipe?(0x40 * ring->pipe - 0xc80):0), lower_32_bits(ring->wptr)); + WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), + regUVD_JRBC0_UVD_JRBC_RB_WPTR, + (ring->pipe ? (0x40 * ring->pipe - 0xc80) : + 0), + lower_32_bits(ring->wptr)); } } @@ -703,15 +736,15 @@ static bool jpeg_v4_0_3_is_idle(void *handle) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ret &= ((RREG32_SOC15_OFFSET(JPEG, i, - regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset) & - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + ret &= ((RREG32_SOC15_OFFSET( + JPEG, GET_INST(JPEG, i), + regUVD_JRBC0_UVD_JRBC_STATUS, + reg_offset) & + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); } } @@ -725,12 +758,11 @@ static int jpeg_v4_0_3_wait_for_idle(void *handle) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, i, + ret &= SOC15_WAIT_ON_RREG_OFFSET( + JPEG, GET_INST(JPEG, i), regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset, UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); @@ -747,8 +779,6 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle, int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; if (enable) { if (!jpeg_v4_0_3_is_idle(handle)) return -EBUSY; @@ -792,35 +822,46 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t i; + uint32_t i, inst; i = node_id_to_phys_map[entry->node_id]; DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); + for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst) + if (adev->jpeg.inst[inst].aid_id == i) + break; + + if (inst >= adev->jpeg.num_jpeg_inst) { + dev_WARN_ONCE(adev->dev, 1, + "Interrupt received for unknown JPEG instance %d", + entry->node_id); + return 0; + } + switch (entry->src_id) { case VCN_4_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[0]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]); break; case VCN_4_0__SRCID__JPEG1_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[1]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]); break; case VCN_4_0__SRCID__JPEG2_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[2]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]); break; case VCN_4_0__SRCID__JPEG3_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[3]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]); break; case VCN_4_0__SRCID__JPEG4_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[4]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]); break; case VCN_4_0__SRCID__JPEG5_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[5]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]); break; case VCN_4_0__SRCID__JPEG6_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[6]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]); break; case VCN_4_0__SRCID__JPEG7_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[7]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -882,17 +923,17 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) { - int i, j; + int i, j, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; adev->jpeg.inst[i].ring_dec[j].me = i; adev->jpeg.inst[i].ring_dec[j].pipe = j; } - adev->jpeg.inst[i].aid_id = i / adev->jpeg.num_inst_per_aid; + jpeg_inst = GET_INST(JPEG, i); + adev->jpeg.inst[i].aid_id = + jpeg_inst / adev->jpeg.num_inst_per_aid; } DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); } @@ -907,8 +948,6 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; } adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 308dfe80a87c..49b07843efd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -83,7 +83,7 @@ static int vcn_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, r; + int i, r, vcn_inst; r = amdgpu_vcn_sw_init(adev); if (r) @@ -104,12 +104,13 @@ static int vcn_v4_0_3_sw_init(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) - continue; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i; + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst; ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -148,8 +149,6 @@ static int vcn_v4_0_3_sw_fini(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) - continue; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; fw_shared->sq.is_enabled = cpu_to_le32(false); @@ -177,21 +176,25 @@ static int vcn_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, r; + int i, r, vcn_inst; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; if (ring->use_doorbell) { - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, - adev->vcn.inst[i].aid_id); + adev->nbio.funcs->vcn_doorbell_range( + adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst, + adev->vcn.inst[i].aid_id); - WREG32_SOC15(VCN, ring->me, regVCN_RB1_DB_CTRL, - ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | - VCN_RB1_DB_CTRL__EN_MASK); + WREG32_SOC15( + VCN, GET_INST(VCN, ring->me), + regVCN_RB1_DB_CTRL, + ring->doorbell_index + << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); } r = amdgpu_ring_test_helper(ring); @@ -278,54 +281,67 @@ static int vcn_v4_0_3_resume(void *handle) */ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx) { - uint32_t offset, size; + uint32_t offset, size, vcn_inst; const struct common_firmware_header *hdr; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + vcn_inst = GET_INST(VCN, inst_idx); /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, 0); + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx] + .tmr_mc_addr_lo)); + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx] + .tmr_mc_addr_hi)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); offset = size; - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0, size); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size); /* cache window 1: stack */ - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, + AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, + AMDGPU_VCN_CONTEXT_SIZE); /* non-cache window */ - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0, + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15( + VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); } @@ -454,18 +470,21 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; + int vcn_inst; if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) return; + vcn_inst = GET_INST(VCN, inst_idx); + /* VCN disable CGC */ - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE); data &= ~(UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__MPEG2_MASK | UVD_CGC_GATE__REGS_MASK @@ -479,10 +498,10 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_CGC_GATE__VCPU_MASK | UVD_CGC_GATE__MMSCH_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE, data); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_CGC_GATE, 0, 0xFFFFFFFF); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF); - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK @@ -495,9 +514,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__MMSCH_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE); data |= (UVD_SUVD_CGC_GATE__SRE_MASK | UVD_SUVD_CGC_GATE__SIT_MASK | UVD_SUVD_CGC_GATE__SMP_MASK @@ -519,9 +538,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK | UVD_SUVD_CGC_GATE__SDB_VP9_MASK | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL); data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK @@ -530,7 +549,7 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data); } /** @@ -595,18 +614,21 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; + int vcn_inst; if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) return; + vcn_inst = GET_INST(VCN, inst_idx); + /* enable VCN CGC */ - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data |= (UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK @@ -618,9 +640,9 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_ | UVD_CGC_CTRL__LRBBM_MODE_MASK | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL); data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK @@ -629,7 +651,7 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data); } /** @@ -646,16 +668,18 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; + int vcn_inst; uint32_t tmp; + vcn_inst = GET_INST(VCN, inst_idx); /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, - ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* enable dynamic power gating mode */ - tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS); tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; - WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); if (indirect) adev->vcn.inst[inst_idx].dpg_sram_curr_addr = @@ -737,27 +761,28 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b ring = &adev->vcn.inst[inst_idx].ring_enc[0]; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, - upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, + upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, + ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); - WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); - ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); - tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB_EN_MASK; - WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); /*resetting done, fw can check RB ring */ @@ -777,99 +802,101 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; + int i, j, k, r, vcn_inst; uint32_t tmp; - int i, j, k, r; if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram); continue; } + vcn_inst = GET_INST(VCN, i); /* set VCN status busy */ - tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; - WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | + UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); /*SW clock gating */ vcn_v4_0_3_disable_clock_gating(adev, i); /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, + ~UVD_VCPU_CNTL__CLK_EN_MASK); /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); /* setup regUVD_LMI_CTRL */ - tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); - WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__MASK_MC_URGENT_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, + tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); /* setup regUVD_MPC_CNTL */ - tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL); tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; - WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp); /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, - ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, - ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); /* setup UVD_MPC_SET_MUX */ - WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, - ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | - (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); vcn_v4_0_3_mc_resume(adev, i); /* VCN global tiling registers */ - WREG32_SOC15(VCN, i, regUVD_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); for (j = 0; j < 10; ++j) { uint32_t status; for (k = 0; k < 100; ++k) { - status = RREG32_SOC15(VCN, i, regUVD_STATUS); + status = RREG32_SOC15(VCN, vcn_inst, + regUVD_STATUS); if (status & 2) break; mdelay(10); @@ -880,12 +907,14 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) DRM_DEV_ERROR(adev->dev, "VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, + regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, + regUVD_VCPU_CNTL), + 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); r = -1; @@ -897,39 +926,40 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) } /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), - UVD_MASTINT_EN__VCPU_EN_MASK, - ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); ring = &adev->vcn.inst[i].ring_enc[0]; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, - upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, + upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, + ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); - WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); - WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); - tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB_EN_MASK; - WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); - ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); fw_shared->sq.queue_mode &= cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); @@ -948,21 +978,24 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { uint32_t tmp; + int vcn_inst; + + vcn_inst = GET_INST(VCN, inst_idx); /* Wait for power status to be 1 */ - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ - tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* disable dynamic power gating mode */ - WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); return 0; } @@ -976,12 +1009,11 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) static int vcn_v4_0_3_stop(struct amdgpu_device *adev) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; + int i, r = 0, vcn_inst; uint32_t tmp; - int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + vcn_inst = GET_INST(VCN, i); fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; @@ -992,7 +1024,8 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) } /* wait for vcn idle */ - r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, + UVD_STATUS__IDLE, 0x7); if (r) goto Done; @@ -1000,45 +1033,47 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, + tmp); if (r) goto Done; /* stall UMC channel */ - tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; - WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, + tmp); if (r) goto Done; /* Unblock VCPU Register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), - UVD_RB_ARB_CTRL__VCPU_DIS_MASK, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, - ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); /* reset LMI UMC/LMI/VCPU */ - tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); - tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; - WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); /* clear VCN status */ - WREG32_SOC15(VCN, i, regUVD_STATUS, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); /* apply HW clock gating */ vcn_v4_0_3_enable_clock_gating(adev, i); @@ -1080,7 +1115,7 @@ static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring) if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) DRM_ERROR("wrong ring id is identified in %s", __func__); - return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR); } /** @@ -1100,7 +1135,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return *ring->wptr_cpu_addr; else - return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), + regUVD_RB_WPTR); } /** @@ -1121,7 +1157,8 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring) *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR, + lower_32_bits(ring->wptr)); } } @@ -1163,14 +1200,14 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { */ static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, vcn_inst; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs; adev->vcn.inst[i].ring_enc[0].me = i; - adev->vcn.inst[i].aid_id = i / adev->vcn.num_inst_per_aid; + vcn_inst = GET_INST(VCN, i); + adev->vcn.inst[i].aid_id = + vcn_inst / adev->vcn.num_inst_per_aid; } DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n"); } @@ -1188,9 +1225,8 @@ static bool vcn_v4_0_3_is_idle(void *handle) int i, ret = 1; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); + ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == + UVD_STATUS__IDLE); } return ret; @@ -1209,10 +1245,8 @@ static int vcn_v4_0_3_wait_for_idle(void *handle) int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE); + ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, + UVD_STATUS__IDLE, UVD_STATUS__IDLE); if (ret) return ret; } @@ -1235,10 +1269,9 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle, int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; if (enable) { - if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) + if (RREG32_SOC15(VCN, GET_INST(VCN, i), + regUVD_STATUS) != UVD_STATUS__IDLE) return -EBUSY; vcn_v4_0_3_enable_clock_gating(adev, i); } else { @@ -1307,15 +1340,26 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t i; + uint32_t i, inst; i = node_id_to_phys_map[entry->node_id]; DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); + for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst) + if (adev->vcn.inst[inst].aid_id == i) + break; + + if (inst >= adev->vcn.num_vcn_inst) { + dev_WARN_ONCE(adev->dev, 1, + "Interrupt received for unknown VCN instance %d", + entry->node_id); + return 0; + } + switch (entry->src_id) { case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: - amdgpu_fence_process(&adev->vcn.inst[i].ring_enc[0]); + amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -1343,9 +1387,6 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - adev->vcn.inst->irq.num_types++; } adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; From 6a944ccbf5f5059de1a9b3d48971a50cb5857ebf Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 27 Feb 2023 16:07:39 +0530 Subject: [PATCH 469/861] drm/amdgpu: Fix harvest reporting of VCN Use VCN instance mask to check if an instance is harvested or not. Signed-off-by: Lijo Lazar Reviewed-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ae5852f80549..caae6bf2fd30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -313,6 +313,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) case 0xCF: case 0xDF: adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; break; default: break; @@ -899,7 +900,7 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, /* Until a uniform way is figured, get mask based on hwid */ switch (hw_id) { case VCN_HWID: - harvest = (1 << inst) & adev->vcn.harvest_config; + harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; break; case DMU_HWID: if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) From d3e53452b0f4cdd210432a268cabdbf65e98ddab Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 15 Mar 2023 03:54:55 -0400 Subject: [PATCH 470/861] drm/amdgpu/vcn: use dummy register selects AID for VCN_RAM ucode Use dummy register 0xDEADBEEF selects AID for PSP VCN_RAM ucode. Signed-off-by: James Zhu Reviewed-by: Sonny Jiang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 49b07843efd1..759f64a4acf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -681,9 +681,15 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); - if (indirect) + if (indirect) { + DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d", + inst_idx, adev->vcn.inst[inst_idx].aid_id); adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */ + WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF, + adev->vcn.inst[inst_idx].aid_id, 0, true); + } /* enable clock gating */ vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); From 358e6c38300b7d2b7d7122d4fe485d8a4580dc1e Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 15 Mar 2023 04:09:25 -0400 Subject: [PATCH 471/861] drm/amdgpu: use physical AID index for ring name Use physical AID index for VCN/JPEG ring name instead of logical AID index. Signed-off-by: James Zhu Reviewed-by: Sonny Jiang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index c0e90e27f24b..ea9cb098a144 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -110,7 +110,7 @@ static int jpeg_v4_0_3_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + j + 9 * jpeg_inst; - sprintf(ring->name, "jpeg_dec_%d.%d", i, j); + sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 759f64a4acf4..b0e28d611f2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -112,7 +112,7 @@ static int vcn_v4_0_3_sw_init(void *handle) (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst; ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); - sprintf(ring->name, "vcn_unified_%d", i); + sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); From 26dc0448ef36ba83be43a7c4da94d55ec626db1a Mon Sep 17 00:00:00 2001 From: Sonny Jiang Date: Mon, 20 Mar 2023 16:17:21 -0400 Subject: [PATCH 472/861] drm/amdgpu: A workaround for JPEG_v4_0_3 ring test fail The jpeg_v4_0_3 jpeg_pitch register uses UVD_JRBC_SCRATCH0. It needs to move WREG() to after jpeg_start. Switch to a posted register write when doing the ring test to make sure the register write lands before we test the result. Signed-off-by: Sonny Jiang Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 388466a5f730..9a1db2bd03e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -124,11 +124,14 @@ int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return 0; - WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe], 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; + WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe], 0xCAFEDEAD); + /* Add a read register to make sure the write register is executed. */ + RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]); + amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); From 55ff23d9eb814dce8393a4c471259ded5a85d461 Mon Sep 17 00:00:00 2001 From: Sonny Jiang Date: Wed, 22 Mar 2023 15:49:29 -0400 Subject: [PATCH 473/861] drm/amdgpu: fixes a JPEG get write/read pointer bug Need parentheses for the micro parameters. Signed-off-by: Sonny Jiang Reviewed-by: David (Ming Qiang) Wu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 1c9e924b5f8c..3730c5ec202f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -71,7 +71,8 @@ AMDGPU_REGS_NO_KIQ, ip##_HWIP) #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \ - __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP) + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)) + \ + (offset), 0, ip##_HWIP) #define WREG32_SOC15(ip, inst, reg, value) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ From 96e693ad78a0778efb5256684a2f3712298f3e18 Mon Sep 17 00:00:00 2001 From: Sonny Jiang Date: Tue, 28 Mar 2023 13:19:11 -0400 Subject: [PATCH 474/861] drm/amdgpu: Add a read after write DB_CTRL for vcn_v4_0_3 To make sure VCN DB_CTRL is delivered before doorbell write. Signed-off-by: Sonny Jiang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index b0e28d611f2d..9d0c3dc76547 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -195,6 +195,11 @@ static int vcn_v4_0_3_hw_init(void *handle) ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15( + VCN, GET_INST(VCN, ring->me), + regVCN_RB1_DB_CTRL); } r = amdgpu_ring_test_helper(ring); From e7947c021a8b394677ab875288565d4dfcac779a Mon Sep 17 00:00:00 2001 From: Sonny Jiang Date: Fri, 24 Mar 2023 17:44:59 -0400 Subject: [PATCH 475/861] drm/amdgpu: Use a different value than 0xDEADBEEF for jpeg ring test The 0xDEADBEEF standard anti-hang value. Use it may cause fake pass. Signed-off-by: Sonny Jiang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 9a1db2bd03e7..8c479669c459 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -133,12 +133,12 @@ int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]); amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0)); - amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_write(ring, 0xABADCAFE); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]); - if (tmp == 0xDEADBEEF) + if (tmp == 0xABADCAFE) break; udelay(1); } From a3edd1ac706243fe5ca1c0925ce120b5a2661975 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 8 Mar 2023 12:41:22 +0530 Subject: [PATCH 476/861] drm/amdgpu/vcn: Use buffer object's deletion logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VCN DPG buffer object is intialized to NULL. If allotted, buffer object deletion logic will take care of NULL check and delete accordingly. This is useful for cases where indirect sram flag could be manipulated later after buffer allocation. Signed-off-by: Lijo Lazar Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 57dabfe1a1be..06ec2dc55857 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -235,11 +235,11 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << j)) continue; - if (adev->vcn.indirect_sram) { - amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, - &adev->vcn.inst[j].dpg_sram_gpu_addr, - (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); - } + amdgpu_bo_free_kernel( + &adev->vcn.inst[j].dpg_sram_bo, + &adev->vcn.inst[j].dpg_sram_gpu_addr, + (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); + kvfree(adev->vcn.inst[j].saved_bo); amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, From 672c883c26c68fe49b161d7ceab94bdc69e57b0e Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 20 Feb 2023 12:04:30 +0530 Subject: [PATCH 477/861] drm/amdgpu: Simplify aquavanjram instance mapping Simplify so as to use the same sequence to assign logical to physical ids for all IPs. Signed-off-by: Lijo Lazar Acked-by: Leo Liu Tested-by: James Zhu Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 33 +++++-------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 51d3cb81e37a..68d732dd9ecb 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -101,32 +101,15 @@ static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) { - int xcc_mask, sdma_mask; - int l, i; + u32 ip_map[][2] = { + { GC_HWIP, adev->gfx.xcc_mask }, + { SDMA0_HWIP, adev->sdma.sdma_mask }, + { VCN_HWIP, adev->vcn.inst_mask }, + }; + int i; - /* Map GC instances */ - l = 0; - xcc_mask = adev->gfx.xcc_mask; - while (xcc_mask) { - i = ffs(xcc_mask) - 1; - adev->ip_map.dev_inst[GC_HWIP][l++] = i; - xcc_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[GC_HWIP][l] = -1; - - l = 0; - sdma_mask = adev->sdma.sdma_mask; - while (sdma_mask) { - i = ffs(sdma_mask) - 1; - adev->ip_map.dev_inst[SDMA0_HWIP][l++] = i; - sdma_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[SDMA0_HWIP][l] = -1; - - /* This covers both VCN and JPEG, JPEG is only alias of VCN */ - aqua_vanjaram_populate_ip_map(adev, VCN_HWIP, adev->vcn.inst_mask); + for (i = 0; i < ARRAY_SIZE(ip_map); ++i) + aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } From bc71daff4f147377674e14589fb651bb36f44d4b Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 23 Feb 2023 20:13:56 +0530 Subject: [PATCH 478/861] drm/amdgpu: Use GPU VA space for IH v4.4.2 in APU For IH ring buffer and read/write pointers, use GPU VA space rather than Guest PA on APU configs. Access through Guest PA doesn't work when IOMMU is enabled. It is also beneficial in NUMA configs as it allocates from the closest numa pool in a numa enabled system. Signed-off-by: Lijo Lazar Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vega20_ih.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 536128447b71..17ccf02462ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -526,6 +526,7 @@ static int vega20_ih_early_init(void *handle) static int vega20_ih_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool use_bus_addr = true; int r; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -533,14 +534,18 @@ static int vega20_ih_sw_init(void *handle) if (r) return r; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); + if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) + use_bus_addr = false; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); if (r) return r; adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, use_bus_addr); if (r) return r; @@ -559,7 +564,7 @@ static int vega20_ih_sw_init(void *handle) /* initialize ih control registers offset */ vega20_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, use_bus_addr); if (r) return r; From db3b5cb64a9ca301d14ed027e470834316720e42 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 24 Feb 2023 18:01:38 +0530 Subject: [PATCH 479/861] drm/amdgpu: Use apt name for FW reserved region Use the generic term fw_reserved_memory for FW reserve region. This region may also hold discovery TMR in addition to other reserve regions. This region size could be larger than discovery tmr size, hence don't change the discovery tmr size based on this. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 36 +++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 ++- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index bc11ae56bba5..c8a2d030f226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1644,14 +1644,15 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) return 0; } -static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) +static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev, + uint32_t reserve_size) { struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; memset(ctx, 0, sizeof(*ctx)); ctx->c2p_train_data_offset = - ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M); + ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M); ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); ctx->train_data_size = @@ -1669,9 +1670,10 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) */ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) { - int ret; struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; bool mem_train_support = false; + uint32_t reserve_size = 0; + int ret; if (!amdgpu_sriov_vf(adev)) { if (amdgpu_atomfirmware_mem_training_supported(adev)) @@ -1687,14 +1689,15 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) * Otherwise, fallback to legacy approach to check and reserve tmr block for ip * discovery data and G6 memory training data respectively */ - adev->mman.discovery_tmr_size = - amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); - if (!adev->mman.discovery_tmr_size) - adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET; + if (adev->bios) + reserve_size = + amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); + if (!reserve_size) + reserve_size = DISCOVERY_TMR_OFFSET; if (mem_train_support) { /* reserve vram for mem train according to TMR location */ - amdgpu_ttm_training_data_block_init(adev); + amdgpu_ttm_training_data_block_init(adev, reserve_size); ret = amdgpu_bo_create_kernel_at(adev, ctx->c2p_train_data_offset, ctx->train_data_size, @@ -1709,15 +1712,13 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) } if (!adev->gmc.is_app_apu) { - ret = amdgpu_bo_create_kernel_at(adev, - adev->gmc.real_vram_size - - adev->mman.discovery_tmr_size, - adev->mman.discovery_tmr_size, - &adev->mman.discovery_memory, - NULL); + ret = amdgpu_bo_create_kernel_at( + adev, adev->gmc.real_vram_size - reserve_size, + reserve_size, &adev->mman.fw_reserved_memory, NULL); if (ret) { DRM_ERROR("alloc tmr failed(%d)!\n", ret); - amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, + NULL, NULL); return ret; } } else { @@ -1905,8 +1906,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->gmc.is_app_apu) { amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); - /* return the IP Discovery TMR memory back to VRAM */ - amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + /* return the FW reserved memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, + NULL); if (adev->mman.stolen_reserved_size) amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, NULL, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e2cd5894afc9..da6544fdc8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -78,7 +78,8 @@ struct amdgpu_mman { /* discovery */ uint8_t *discovery_bin; uint32_t discovery_tmr_size; - struct amdgpu_bo *discovery_memory; + /* fw reserved memory */ + struct amdgpu_bo *fw_reserved_memory; /* firmware VRAM reservation */ u64 fw_vram_usage_start_offset; From 4bdca2057933ef08a2ca7f44e30a8894ff78c472 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 25 Jan 2023 20:04:52 +0530 Subject: [PATCH 480/861] drm/amdgpu: Add utility functions for xcp Add utility functions to get details of xcp and iterate through available xcps. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 12 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 31 +++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index e8aa4d6c6b62..337d558a3145 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -256,3 +256,15 @@ int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, return id_mask; } + +int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, + enum AMDGPU_XCP_IP_BLOCK ip, + uint32_t *inst_mask) +{ + if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid)) + return -EINVAL; + + *inst_mask = xcp->ip[ip].inst_mask; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 1d3dc7d68f54..45d590d7fd95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -108,4 +108,35 @@ int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, enum AMDGPU_XCP_IP_BLOCK ip, int instance); +int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, + enum AMDGPU_XCP_IP_BLOCK ip, + uint32_t *inst_mask); + +static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) +{ + if (!xcp_mgr) + return 1; + else + return xcp_mgr->num_xcps; +} + +static inline struct amdgpu_xcp * +amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from) +{ + if (!xcp_mgr) + return NULL; + + while (*from < MAX_XCP) { + if (xcp_mgr->xcp[*from].valid) + return &xcp_mgr->xcp[*from]; + ++(*from); + } + + return NULL; +} + +#define for_each_xcp(xcp_mgr, xcp, i) \ + for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \ + xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) + #endif From b6f90baafe267a0705c5d9b1429c875d3c39fbc7 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 31 Jan 2023 12:39:49 +0530 Subject: [PATCH 481/861] drm/amdgpu: Move memory partition query to gmc GMC block handles memory related information, it makes more sense to keep memory partition functions in gmc block. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 30 +---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 11 ------- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 44 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 16 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 10 ------ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 18 ++++++++++ 6 files changed, 79 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1487ecac2705..2f7a101593e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1204,24 +1204,6 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", partition_mode); } -static ssize_t amdgpu_gfx_get_current_memory_partition(struct device *dev, - struct device_attribute *addr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - enum amdgpu_memory_partition mode; - static const char *partition_modes[] = { - "UNKNOWN", "NPS1", "NPS2", "NPS4", "NPS8" - }; - BUILD_BUG_ON(ARRAY_SIZE(partition_modes) <= AMDGPU_NPS8_PARTITION_MODE); - - mode = min((int)adev->gfx.funcs->query_mem_partition_mode(adev), - AMDGPU_NPS8_PARTITION_MODE); - - return sysfs_emit(buf, "%s\n", partition_modes[mode]); -} - static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct device_attribute *addr, const char *buf, size_t count) @@ -1305,9 +1287,6 @@ static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, static DEVICE_ATTR(available_compute_partition, S_IRUGO, amdgpu_gfx_get_available_compute_partition, NULL); -static DEVICE_ATTR(current_memory_partition, S_IRUGO, - amdgpu_gfx_get_current_memory_partition, NULL); - int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) { int r; @@ -1317,19 +1296,12 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) return r; r = device_create_file(adev->dev, &dev_attr_available_compute_partition); - if (r) - return r; - r = device_create_file(adev->dev, &dev_attr_current_memory_partition); - if (r) - return r; - - return 0; + return r; } void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) { device_remove_file(adev->dev, &dev_attr_current_compute_partition); device_remove_file(adev->dev, &dev_attr_available_compute_partition); - device_remove_file(adev->dev, &dev_attr_current_memory_partition); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 81b4c7e684af..728977f8afe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -71,14 +71,6 @@ enum amdgpu_pkg_type { AMDGPU_PKG_TYPE_UNKNOWN, }; -enum amdgpu_memory_partition { - UNKNOWN_MEMORY_PARTITION_MODE = 0, - AMDGPU_NPS1_PARTITION_MODE = 1, - AMDGPU_NPS2_PARTITION_MODE = 2, - AMDGPU_NPS4_PARTITION_MODE = 3, - AMDGPU_NPS8_PARTITION_MODE = 4, -}; - struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -276,8 +268,6 @@ struct amdgpu_gfx_funcs { struct amdgpu_gfx_shadow_info *shadow_info); enum amdgpu_gfx_partition (*query_partition_mode)(struct amdgpu_device *adev); - enum amdgpu_memory_partition - (*query_mem_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); @@ -414,7 +404,6 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ uint16_t xcc_mask; - enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index b8825a0670a4..d12625f1de5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -891,3 +891,47 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) return 0; } + +static ssize_t current_memory_partition_show( + struct device *dev, struct device_attribute *addr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + switch (mode) { + case AMDGPU_NPS1_PARTITION_MODE: + return sysfs_emit(buf, "NPS1\n"); + case AMDGPU_NPS2_PARTITION_MODE: + return sysfs_emit(buf, "NPS2\n"); + case AMDGPU_NPS3_PARTITION_MODE: + return sysfs_emit(buf, "NPS3\n"); + case AMDGPU_NPS4_PARTITION_MODE: + return sysfs_emit(buf, "NPS4\n"); + case AMDGPU_NPS6_PARTITION_MODE: + return sysfs_emit(buf, "NPS6\n"); + case AMDGPU_NPS8_PARTITION_MODE: + return sysfs_emit(buf, "NPS8\n"); + default: + return sysfs_emit(buf, "UNKNOWN\n"); + } + + return sysfs_emit(buf, "UNKNOWN\n"); +} + +static DEVICE_ATTR_RO(current_memory_partition); + +int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev) +{ + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) + return 0; + + return device_create_file(adev->dev, + &dev_attr_current_memory_partition); +} + +void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_current_memory_partition); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index e408abfc2daf..2bd3b9665ebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -63,6 +63,16 @@ struct firmware; +enum amdgpu_memory_partition { + UNKNOWN_MEMORY_PARTITION_MODE = 0, + AMDGPU_NPS1_PARTITION_MODE = 1, + AMDGPU_NPS2_PARTITION_MODE = 2, + AMDGPU_NPS3_PARTITION_MODE = 3, + AMDGPU_NPS4_PARTITION_MODE = 4, + AMDGPU_NPS6_PARTITION_MODE = 6, + AMDGPU_NPS8_PARTITION_MODE = 8, +}; + /* * GMC page fault information */ @@ -140,6 +150,9 @@ struct amdgpu_gmc_funcs { uint64_t *flags); /* get the amount of memory used by the vbios for pre-OS console */ unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); + + enum amdgpu_memory_partition (*query_mem_partition_mode)( + struct amdgpu_device *adev); }; struct amdgpu_xgmi_ras { @@ -375,4 +388,7 @@ uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); +int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev); +void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 69867294117e..81ab3cd2f229 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -607,16 +607,7 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, { soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } -static enum amdgpu_memory_partition -gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) -{ - enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; - if (adev->nbio.funcs->get_memory_partition_mode) - mode = adev->nbio.funcs->get_memory_partition_mode(adev); - - return mode; -} static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) @@ -660,7 +651,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, - .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 245de27c7540..db157a31a780 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1330,6 +1330,17 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } +static enum amdgpu_memory_partition +gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev); + + return mode; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, @@ -1339,6 +1350,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .get_vm_pde = gmc_v9_0_get_vm_pde, .get_vm_pte = gmc_v9_0_get_vm_pte, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, + .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -1901,6 +1913,9 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + amdgpu_gmc_sysfs_init(adev); + return 0; } @@ -1908,6 +1923,9 @@ static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + amdgpu_gmc_sysfs_fini(adev); + amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); From 0f2e1d620eca56c4ceebc041aabb1eda26b2cfd0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 17 Feb 2023 09:32:44 +0530 Subject: [PATCH 482/861] drm/amdgpu: Get supported memory partition modes Expand the interface to get supported memory partition modes also along with the current memory partition mode. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 3 ++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 +++++++++-- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 9 ++++++++- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index eb25ac98903f..095aecfb201e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -97,7 +97,8 @@ struct amdgpu_nbio_funcs { void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); int (*get_compute_partition_mode)(struct amdgpu_device *adev); - u32 (*get_memory_partition_mode)(struct amdgpu_device *adev); + u32 (*get_memory_partition_mode)(struct amdgpu_device *adev, + u32 *supp_modes); void (*set_compute_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index db157a31a780..d6a1dac01952 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1331,16 +1331,23 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) } static enum amdgpu_memory_partition -gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) +gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) { enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; if (adev->nbio.funcs->get_memory_partition_mode) - mode = adev->nbio.funcs->get_memory_partition_mode(adev); + mode = adev->nbio.funcs->get_memory_partition_mode(adev, + supp_modes); return mode; } +static enum amdgpu_memory_partition +gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) +{ + return gmc_v9_0_get_memory_partition(adev, NULL); +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index fa4b423c399b..e1552d645308 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -408,12 +408,19 @@ static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS, tmp); } -static enum amdgpu_memory_partition nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev) +static enum amdgpu_memory_partition +nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, u32 *supp_modes) { u32 tmp; + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE); + if (supp_modes) { + *supp_modes = + RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_CAP); + } + return ffs(tmp); } From 1cc823011a23fa0e3497e9f6655172b2507ce2cd Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 14 Feb 2023 18:33:51 +0530 Subject: [PATCH 483/861] drm/amdgpu: Store additional numa node information Use a struct to store additional numa node information including size and base address. Add numa_info pointer to xcc object to point to the relevant structure based on its proximity domain. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 79 ++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 9dbdd699dcea..6a13e9c27550 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -53,10 +54,18 @@ static const guid_t amd_xcc_dsm_guid = GUID_INIT(0x8267f5d5, 0xa556, 0x44f2, #define AMD_XCC_MAX_HID 24 +struct amdgpu_numa_info { + uint64_t size; + int pxm; + int nid; +}; + +struct xarray numa_info_xa; + /* Encapsulates the XCD acpi object information */ struct amdgpu_acpi_xcc_info { struct list_head list; - int mem_node; + struct amdgpu_numa_info *numa_info; uint8_t xcp_node; uint8_t phy_id; acpi_handle handle; @@ -838,6 +847,52 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta return r; } +static inline uint64_t amdgpu_acpi_get_numa_size(int nid) +{ + /* This is directly using si_meminfo_node implementation as the + * function is not exported. + */ + int zone_type; + uint64_t managed_pages = 0; + + pg_data_t *pgdat = NODE_DATA(nid); + + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) + managed_pages += + zone_managed_pages(&pgdat->node_zones[zone_type]); + return managed_pages * PAGE_SIZE; +} + +static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) +{ + struct amdgpu_numa_info *numa_info; + int nid; + + numa_info = xa_load(&numa_info_xa, pxm); + + if (!numa_info) { + struct sysinfo info; + + numa_info = kzalloc(sizeof *numa_info, GFP_KERNEL); + if (!numa_info) + return NULL; + + nid = pxm_to_node(pxm); + numa_info->pxm = pxm; + numa_info->nid = nid; + + if (numa_info->nid == NUMA_NO_NODE) { + si_meminfo(&info); + numa_info->size = info.totalram * info.mem_unit; + } else { + numa_info->size = amdgpu_acpi_get_numa_size(nid); + } + xa_store(&numa_info_xa, numa_info->pxm, numa_info, GFP_KERNEL); + } + + return numa_info; +} + /** * amdgpu_acpi_get_node_id - obtain the NUMA node id for corresponding amdgpu * acpi device handle @@ -850,18 +905,25 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta * * Returns ACPI STATUS OK with Node ID on success or the corresponding failure reason */ -acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, int *nid) +acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, + struct amdgpu_numa_info **numa_info) { #ifdef CONFIG_ACPI_NUMA u64 pxm; acpi_status status; + if (!numa_info) + return_ACPI_STATUS(AE_ERROR); + status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); if (ACPI_FAILURE(status)) return status; - *nid = pxm_to_node(pxm); + *numa_info = amdgpu_acpi_get_numa_info(pxm); + + if (!*numa_info) + return_ACPI_STATUS(AE_ERROR); return_ACPI_STATUS(AE_OK); #else @@ -1001,7 +1063,8 @@ static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, ACPI_FREE(obj); obj = NULL; - status = amdgpu_acpi_get_node_id(xcc_info->handle, &xcc_info->mem_node); + status = + amdgpu_acpi_get_node_id(xcc_info->handle, &xcc_info->numa_info); /* TODO: check if this check is required */ if (ACPI_SUCCESS(status)) @@ -1023,6 +1086,7 @@ static int amdgpu_acpi_enumerate_xcc(void) u16 bdf; INIT_LIST_HEAD(&amdgpu_acpi_dev_list); + xa_init(&numa_info_xa); for (id = 0; id < AMD_XCC_MAX_HID; id++) { sprintf(hid, "%s%d", "AMD", AMD_XCC_HID_START + id); @@ -1353,6 +1417,13 @@ void amdgpu_acpi_release(void) { struct amdgpu_acpi_dev_info *dev_info, *dev_tmp; struct amdgpu_acpi_xcc_info *xcc_info, *xcc_tmp; + struct amdgpu_numa_info *numa_info; + unsigned long index; + + xa_for_each(&numa_info_xa, index, numa_info) { + kfree(numa_info); + xa_erase(&numa_info_xa, index); + } if (list_empty(&amdgpu_acpi_dev_list)) return; From fa0497c34eb7dd9db9a09963917382e924c3fbc5 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 14 Feb 2023 18:59:40 +0530 Subject: [PATCH 484/861] drm/amdgpu: Add API to get numa information of XCC Add interface to get numa information of ACPI XCC object. The interface uses logical id to identify an XCC. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 32 +++++++++++++++++++----- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f4461bc8b1fd..f2bafab15ceb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1383,6 +1383,12 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); /* amdgpu_acpi.c */ +struct amdgpu_numa_info { + uint64_t size; + int pxm; + int nid; +}; + /* ATCS Device/Driver State */ #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 @@ -1402,6 +1408,8 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size); +int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, + struct amdgpu_numa_info *numa_info); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); @@ -1414,6 +1422,12 @@ static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, { return -EINVAL; } +static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, + int xcc_id, + struct amdgpu_numa_info *numa_info) +{ + return -EINVAL; +} static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 6a13e9c27550..873532c4adbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -54,12 +54,6 @@ static const guid_t amd_xcc_dsm_guid = GUID_INIT(0x8267f5d5, 0xa556, 0x44f2, #define AMD_XCC_MAX_HID 24 -struct amdgpu_numa_info { - uint64_t size; - int pxm; - int nid; -}; - struct xarray numa_info_xa; /* Encapsulates the XCD acpi object information */ @@ -1156,6 +1150,32 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, return 0; } +int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, + struct amdgpu_numa_info *numa_info) +{ + struct amdgpu_acpi_dev_info *dev_info; + struct amdgpu_acpi_xcc_info *xcc_info; + u16 bdf; + + if (!numa_info) + return -EINVAL; + + bdf = (adev->pdev->bus->number << 8) | adev->pdev->devfn; + dev_info = amdgpu_acpi_get_dev(bdf); + if (!dev_info) + return -ENOENT; + + list_for_each_entry(xcc_info, &dev_info->xcc_list, list) { + if (xcc_info->phy_id == xcc_id) { + memcpy(numa_info, xcc_info->numa_info, + sizeof(*numa_info)); + return 0; + } + } + + return -ENOENT; +} + /** * amdgpu_acpi_event - handle notify events * From 14493cb99b71dbaff58dc0dc0b2cc0a56a88ef05 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 14 Feb 2023 14:37:53 +0530 Subject: [PATCH 485/861] drm/amdgpu: Add memory partitions to gmc Some ASICs have the device memory divided into multiple partitions. The parititions could be denoted by a numa node or by a range of pages. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 2bd3b9665ebf..43357d699e6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -178,6 +178,21 @@ struct amdgpu_xgmi { struct amdgpu_xgmi_ras *ras; }; +struct amdgpu_mem_partition_info { + union { + struct { + uint32_t fpfn; + uint32_t lpfn; + } range; + struct { + int node; + } numa; + }; + uint64_t size; +}; + +#define INVALID_PFN -1 + struct amdgpu_gmc { /* FB's physical address in MMIO space (for CPU to * map FB). This is different compared to the agp/ @@ -266,6 +281,8 @@ struct amdgpu_gmc { bool tmz_enabled; bool is_app_apu; + struct amdgpu_mem_partition_info *mem_partitions; + uint8_t num_mem_partitions; const struct amdgpu_gmc_funcs *gmc_funcs; struct amdgpu_xgmi xgmi; From a433f1f59484fba7a7743a3c5a5f320d9e828b3a Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 14 Feb 2023 14:45:45 +0530 Subject: [PATCH 486/861] drm/amdgpu: Initialize memory ranges for GC 9.4.3 GC 9.4.3 ASICS may have memory split into multiple partitions.Initialize the memory partition information for each range. The information may be in the form of a numa node id or a range of pages. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 172 ++++++++++++++++++++++++++ 1 file changed, 172 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d6a1dac01952..1653d77df3ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -79,6 +79,7 @@ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 +#define MAX_MEM_RANGES 8 static const char *gfxhub_client_ids[] = { "CB", @@ -1742,6 +1743,169 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } +static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode; + u32 supp_modes; + bool valid; + + mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); + + /* Mode detected by hardware not present in supported modes */ + if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && + !(BIT(mode - 1) & supp_modes)) + return false; + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + case AMDGPU_NPS1_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 1); + break; + case AMDGPU_NPS2_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 2); + break; + case AMDGPU_NPS4_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 3 || + adev->gmc.num_mem_partitions == 4); + break; + default: + valid = false; + } + + return valid; +} + +static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid) +{ + int i; + + /* Check if node with id 'nid' is present in 'node_ids' array */ + for (i = 0; i < num_ids; ++i) + if (node_ids[i] == nid) + return true; + + return false; +} + +static void +gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + int num_ranges = 0, ret, mem_groups; + struct amdgpu_numa_info numa_info; + int node_ids[MAX_MEM_RANGES]; + int num_xcc, xcc_id; + uint32_t xcc_mask; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + xcc_mask = (1U << num_xcc) - 1; + mem_groups = hweight32(adev->aid_mask); + + for_each_inst(xcc_id, xcc_mask) { + ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); + if (ret) + continue; + + if (numa_info.nid == NUMA_NO_NODE) { + mem_ranges[0].size = numa_info.size; + mem_ranges[0].numa.node = numa_info.nid; + num_ranges = 1; + break; + } + + if (gmc_v9_0_is_node_present(node_ids, num_ranges, + numa_info.nid)) + continue; + + node_ids[num_ranges] = numa_info.nid; + mem_ranges[num_ranges].numa.node = numa_info.nid; + mem_ranges[num_ranges].size = numa_info.size; + ++num_ranges; + } + + adev->gmc.num_mem_partitions = num_ranges; + + /* If there is only partition, don't use entire size */ + if (adev->gmc.num_mem_partitions == 1) + mem_ranges[0].size = + (mem_ranges[0].size * (mem_groups - 1) / mem_groups); +} + +static void +gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + enum amdgpu_memory_partition mode; + u32 start_addr = 0, size; + int i; + + mode = gmc_v9_0_query_memory_partition(adev); + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + case AMDGPU_NPS1_PARTITION_MODE: + adev->gmc.num_mem_partitions = 1; + break; + case AMDGPU_NPS2_PARTITION_MODE: + adev->gmc.num_mem_partitions = 2; + break; + case AMDGPU_NPS4_PARTITION_MODE: + if (adev->flags & AMD_IS_APU) + adev->gmc.num_mem_partitions = 3; + else + adev->gmc.num_mem_partitions = 4; + break; + default: + adev->gmc.num_mem_partitions = 1; + break; + } + + size = (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) / + adev->gmc.num_mem_partitions; + + for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { + mem_ranges[i].range.fpfn = start_addr; + mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT); + mem_ranges[i].range.lpfn = start_addr + size - 1; + start_addr += size; + } + + /* Adjust the last one */ + mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn = + (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; + mem_ranges[adev->gmc.num_mem_partitions - 1].size = + adev->gmc.real_vram_size - + ((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn + << AMDGPU_GPU_PAGE_SHIFT); +} + +static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) +{ + bool valid; + + adev->gmc.mem_partitions = kzalloc( + MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info), + GFP_KERNEL); + + if (!adev->gmc.mem_partitions) + return -ENOMEM; + + /* TODO : Get the range from PSP/Discovery for dGPU */ + if (adev->gmc.is_app_apu) + gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); + else + gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + + valid = gmc_v9_0_validate_partition_info(adev); + if (!valid) { + /* TODO: handle invalid case */ + dev_WARN(adev->dev, + "Mem ranges not matching with hardware config"); + } + + return 0; +} + static int gmc_v9_0_sw_init(void *handle) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; @@ -1888,6 +2052,12 @@ static int gmc_v9_0_sw_init(void *handle) amdgpu_gmc_get_vbios_allocations(adev); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + r = gmc_v9_0_init_mem_ranges(adev); + if (r) + return r; + } + /* Memory manager */ r = amdgpu_bo_init(adev); if (r) @@ -1932,6 +2102,8 @@ static int gmc_v9_0_sw_fini(void *handle) if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) amdgpu_gmc_sysfs_fini(adev); + adev->gmc.num_mem_partitions = 0; + kfree(adev->gmc.mem_partitions); amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); From da539b213d7952741499283636f70406383b9570 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 3 Feb 2023 17:12:10 +0530 Subject: [PATCH 487/861] drm/amdgpu: Add callback to fill xcp memory id Add callback in xcp interface to fill xcp memory id information. Memory id is used to identify the range/partition of an XCP from the available memory partitions in device. Also, fill the id information. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 12 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 4 +++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 337d558a3145..e1d3727036a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -116,6 +116,7 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) { struct amdgpu_xcp_ip ip; + uint8_t mem_id; int i, j, ret; for (i = 0; i < MAX_XCP; ++i) @@ -130,6 +131,17 @@ static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) __amdgpu_xcp_add_block(xcp_mgr, i, &ip); } + + xcp_mgr->xcp[i].id = i; + + if (xcp_mgr->funcs->get_xcp_mem_id) { + ret = xcp_mgr->funcs->get_xcp_mem_id( + xcp_mgr, &xcp_mgr->xcp[i], &mem_id); + if (ret) + continue; + else + xcp_mgr->xcp[i].mem_id = mem_id; + } } xcp_mgr->num_xcps = num_xcps; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 45d590d7fd95..7e7e458d307e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -68,7 +68,7 @@ struct amdgpu_xcp { struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS]; uint8_t id; - uint8_t mem_node; + uint8_t mem_id; bool valid; }; @@ -89,6 +89,8 @@ struct amdgpu_xcp_mgr_funcs { int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip); + int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr, + struct amdgpu_xcp *xcp, uint8_t *mem_id); int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); From 15e3eee8d3939d1f28cd314a5db2590ab94109d6 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 3 Feb 2023 17:14:12 +0530 Subject: [PATCH 488/861] drm/amdgpu: Fill xcp mem node in aquavanjaram Implement callbacks to fill memory node information in aquavanjaram. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 61 ++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 68d732dd9ecb..aa1bb7883158 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -331,6 +331,64 @@ out: return ret; } +static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev, + int xcc_id, uint8_t *mem_id) +{ + /* TODO: Check if any validation is required based on current + * memory/spatial modes + */ + *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp; + + return 0; +} + +static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr, + struct amdgpu_xcp *xcp, uint8_t *mem_id) +{ + struct amdgpu_numa_info numa_info; + struct amdgpu_device *adev; + uint32_t xcc_mask; + int r, i, xcc_id; + + adev = xcp_mgr->adev; + /* TODO: BIOS is not returning the right info now + * Check on this later + */ + /* + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + */ + if (adev->gmc.num_mem_partitions == 1) { + /* Only one range */ + *mem_id = 0; + return 0; + } + + r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask); + if (r || !xcc_mask) + return -EINVAL; + + xcc_id = ffs(xcc_mask) - 1; + if (!adev->gmc.is_app_apu) + return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id); + + r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); + + if (r) + return r; + + r = -EINVAL; + for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { + if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) { + *mem_id = i; + r = 0; + break; + } + } + + return r; +} + int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip) @@ -344,7 +402,8 @@ int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, - .get_ip_details = &aqua_vanjaram_get_xcp_ip_details + .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, + .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) From e47947abb9e71176ea2d9c8f55e03134dabd2605 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 3 Feb 2023 18:46:40 +0530 Subject: [PATCH 489/861] drm/amdgpu: Move initialization of xcp before kfd After partition switch, fill all relevant xcp information before kfd starts initialization. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 16 +++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 1 + .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 6 ++++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index e1d3727036a1..bca226cc4e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -113,12 +113,17 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, xcp->valid = true; } -static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) +int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) { struct amdgpu_xcp_ip ip; uint8_t mem_id; int i, j, ret; + if (!num_xcps || num_xcps > MAX_XCP) + return -EINVAL; + + xcp_mgr->mode = mode; + for (i = 0; i < MAX_XCP; ++i) xcp_mgr->xcp[i].valid = false; @@ -181,13 +186,6 @@ int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) goto out; } - if (!num_xcps || num_xcps > MAX_XCP) { - ret = -EINVAL; - goto out; - } - - xcp_mgr->mode = mode; - __amdgpu_xcp_init(xcp_mgr, num_xcps); out: mutex_unlock(&xcp_mgr->xcp_lock); @@ -240,7 +238,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, mutex_init(&xcp_mgr->xcp_lock); if (init_mode != AMDGPU_XCP_MODE_NONE) - __amdgpu_xcp_init(xcp_mgr, init_num_xcps); + amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode); adev->xcp_mgr = xcp_mgr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 7e7e458d307e..e1319b887bf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -105,6 +105,7 @@ int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs); +int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode); int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index aa1bb7883158..004400fb89b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -321,9 +321,11 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, if (adev->nbio.funcs->set_compute_partition_mode) adev->nbio.funcs->set_compute_partition_mode(adev, mode); - ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); - + /* Init info about new xcps */ *num_xcps = num_xcc / num_xcc_per_xcp; + amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); + + ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); From a75f2271a4936265c8a189ab06f9eb89e343b441 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 9 Feb 2023 14:44:13 +0530 Subject: [PATCH 490/861] drm/amdkfd: Add xcp reference to kfd node Fetch xcp information from xcp_mgr and also add xcc_mask to kfd node. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 19 +++++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 +++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 4293cbf9ceb0..647c3313c27e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -594,6 +594,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; int num_xcd, partition_mode; + int xcp_idx; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -603,11 +604,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - num_xcd = NUM_XCC(kfd->adev->gfx.xcc_mask); - if (num_xcd == 0 || num_xcd == 1 || kfd->adev->gfx.num_xcc_per_xcp == 0) - kfd->num_nodes = 1; - else - kfd->num_nodes = num_xcd / kfd->adev->gfx.num_xcc_per_xcp; + kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr); + if (kfd->num_nodes == 0) { dev_err(kfd_device, "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", @@ -735,7 +733,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->num_nodes); /* Allocate the KFD nodes */ - for (i = 0; i < kfd->num_nodes; i++) { + for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) { node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); if (!node) goto node_alloc_error; @@ -745,6 +743,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->kfd = kfd; node->kfd2kgd = kfd->kfd2kgd; node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); + /* TODO : Check if error handling is needed */ + if (node->xcp) + amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, + &node->xcc_mask); + else + node->xcc_mask = + (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; + node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); node->start_xcc_id = node->num_xcc_per_node * i; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6e1c15682c28..559ac5efdc26 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -278,6 +278,9 @@ struct kfd_node { unsigned int start_xcc_id; /* Starting XCC instance * number for the node */ + uint32_t xcc_mask; /* Instance mask of XCCs present */ + struct amdgpu_xcp *xcp; + /* Interrupts */ struct kfifo ih_fifo; struct workqueue_struct *ih_wq; From c4050ff1a43eec08498b1ed876efc6213592dba0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 9 Feb 2023 16:30:53 +0530 Subject: [PATCH 491/861] drm/amdkfd: Use xcc mask for identifying xcc Instead of start xcc id and number of xcc per node, use the xcc mask which is the mask of logical ids of xccs belonging to a parition. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 9 +- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 86 ++++++++++--------- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 2 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 71 +++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 - drivers/gpu/drm/amd/amdkfd/kfd_process.c | 8 +- .../amd/amdkfd/kfd_process_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 +- 8 files changed, 95 insertions(+), 95 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 647c3313c27e..b5497d2ee984 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -745,15 +745,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->vm_info.vmid_num_kfd = vmid_num_kfd; node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); /* TODO : Check if error handling is needed */ - if (node->xcp) + if (node->xcp) { amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, &node->xcc_mask); - else + ++xcp_idx; + } else { node->xcc_mask = (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; - - node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); - node->start_xcc_id = node->num_xcc_per_node * i; + } if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2b5c4b2dd242..493b4b66f180 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -136,16 +136,14 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - int xcc = 0; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id; - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->program_sh_mem_settings( - dqm->dev->adev, qpd->vmid, - qpd->sh_mem_config, - qpd->sh_mem_ape1_base, - qpd->sh_mem_ape1_limit, - qpd->sh_mem_bases, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, + qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, + qpd->sh_mem_bases, xcc_id); } static void kfd_hws_hang(struct device_queue_manager *dqm) @@ -427,14 +425,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, static void program_trap_handler_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - int xcc = 0; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id; if (dqm->dev->kfd2kgd->program_trap_handler_settings) - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->program_trap_handler_settings( - dqm->dev->adev, qpd->vmid, - qpd->tba_addr, qpd->tma_addr, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, qpd->vmid, qpd->tba_addr, + qpd->tma_addr, xcc_id); } static int allocate_vmid(struct device_queue_manager *dqm, @@ -697,7 +695,8 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process struct kfd_process_device *pdd; int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; - int xcc = 0; + uint32_t xcc_mask = dev->xcc_mask; + int xcc_id; reg_sq_cmd.u32All = 0; reg_gfx_index.u32All = 0; @@ -742,11 +741,10 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; reg_sq_cmd.bits.vm_id = vmid; - for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) - dev->kfd2kgd->wave_control_execute(dev->adev, - reg_gfx_index.u32All, - reg_sq_cmd.u32All, - dev->start_xcc_id + xcc); + for_each_inst(xcc_id, xcc_mask) + dev->kfd2kgd->wave_control_execute( + dev->adev, reg_gfx_index.u32All, + reg_sq_cmd.u32All, xcc_id); return 0; } @@ -1258,12 +1256,12 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { - int xcc = 0, ret; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id, ret; - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + for_each_inst(xcc_id, xcc_mask) { ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->adev, pasid, vmid, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, pasid, vmid, xcc_id); if (ret) break; } @@ -1273,15 +1271,14 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, static void init_interrupts(struct device_queue_manager *dqm) { - unsigned int i, xcc; + uint32_t xcc_mask = dqm->dev->xcc_mask; + unsigned int i, xcc_id; for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { if (is_pipe_enabled(dqm, 0, i)) { - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->init_interrupts( - dqm->dev->adev, i, - dqm->dev->start_xcc_id + - xcc); + dqm->dev->adev, i, xcc_id); } } } @@ -2283,7 +2280,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) get_num_all_sdma_engines(dqm) * dev->kfd->device_info.num_sdma_queues_per_engine + (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * - dqm->dev->num_xcc_per_node); + NUM_XCC(dqm->dev->xcc_mask)); retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), @@ -2489,10 +2486,10 @@ static void seq_reg_dump(struct seq_file *m, int dqm_debugfs_hqds(struct seq_file *m, void *data) { struct device_queue_manager *dqm = data; + uint32_t xcc_mask = dqm->dev->xcc_mask; uint32_t (*dump)[2], n_regs; int pipe, queue; - int r = 0, xcc; - uint32_t inst; + int r = 0, xcc_id; uint32_t sdma_engine_start; if (!dqm->sched_running) { @@ -2500,16 +2497,18 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) return 0; } - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { - inst = dqm->dev->start_xcc_id + xcc; + for_each_inst(xcc_id, xcc_mask) { r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, - KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, - &dump, &n_regs, inst); + KFD_CIK_HIQ_PIPE, + KFD_CIK_HIQ_QUEUE, &dump, + &n_regs, xcc_id); if (!r) { - seq_printf(m, + seq_printf( + m, " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", - inst, KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, - KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), + xcc_id, + KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1, + KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm), KFD_CIK_HIQ_QUEUE); seq_reg_dump(m, dump, n_regs); @@ -2524,13 +2523,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; - r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->adev, pipe, queue, &dump, &n_regs, inst); + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, + pipe, queue, + &dump, &n_regs, + xcc_id); if (r) break; - seq_printf(m, " Inst %d, CP Pipe %d, Queue %d\n", - inst, pipe, queue); + seq_printf(m, + " Inst %d, CP Pipe %d, Queue %d\n", + xcc_id, pipe, queue); seq_reg_dump(m, dump, n_regs); kfree(dump); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index d81125421aaf..863cf060af48 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -77,7 +77,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * - dev->num_xcc_per_node; + NUM_XCC(dev->xcc_mask); mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem + offset); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index c781314b213c..226132ec3714 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -128,7 +128,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, (ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * - node->num_xcc_per_node, + NUM_XCC(node->xcc_mask), &(mqd_mem_obj->gtt_mem), &(mqd_mem_obj->gpu_addr), (void *)&(mqd_mem_obj->cpu_ptr), true); @@ -482,7 +482,7 @@ static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc); init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); @@ -506,21 +506,21 @@ static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - int xcc, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + hiq_mqd_size * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + hiq_mqd_size * inst; err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, - p->doorbell_off, - start_inst+xcc); + p->doorbell_off, xcc_id); if (err) { - pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); + pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst); break; } + ++inst; } return err; @@ -530,20 +530,21 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + hiq_mqd_size * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + hiq_mqd_size * inst; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id, start_inst+xcc); + queue_id, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + pr_debug("Destroy MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -573,7 +574,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc); init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); @@ -600,7 +601,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, m->compute_tg_chunk_size = 1; m->compute_current_logic_xcc_id = (local_xcc_start + xcc) % - mm->dev->num_xcc_per_node; + NUM_XCC(mm->dev->xcc_mask); switch (xcc) { case 0: @@ -633,7 +634,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, int xcc = 0; uint64_t size = mm->mqd_stride(mm, q); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { m = get_mqd(mqd + size * xcc); update_mqd(mm, m, q, minfo); @@ -661,24 +662,25 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; struct v9_mqd *m; uint64_t mqd_offset; - uint32_t start_inst = mm->dev->start_xcc_id; m = get_mqd(mqd); mqd_offset = m->cp_mqd_stride_size; - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + mqd_offset * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + mqd_offset * inst; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id, start_inst+xcc); + queue_id, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + pr_debug("Destroy MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -690,21 +692,22 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t mqd_stride_size = mm->mqd_stride(mm, p); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + mqd_stride_size * xcc; - err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, - pipe_id, queue_id, - (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms, start_inst+xcc); + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + mqd_stride_size * inst; + err = mm->dev->kfd2kgd->hqd_load( + mm->dev->adev, xcc_mqd, pipe_id, queue_id, + (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms, + xcc_id); if (err) { - pr_debug("Load MQD failed for xcc: %d\n", xcc); + pr_debug("Load MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -722,7 +725,7 @@ static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd, uint64_t mqd_stride_size = mm->mqd_stride(mm, q); u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0; - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { xcc_mqd = mqd + mqd_stride_size * xcc; xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack + q->ctx_save_restore_area_size * xcc); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 559ac5efdc26..02a90fd7f646 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -274,10 +274,6 @@ struct kfd_node { */ struct kfd_vmid_info vm_info; unsigned int id; /* topology stub index */ - unsigned int num_xcc_per_node; - unsigned int start_xcc_id; /* Starting XCC instance - * number for the node - */ uint32_t xcc_mask; /* Instance mask of XCCs present */ struct amdgpu_xcp *xcp; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a6ff57f11472..7f7d1378a2f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -2058,6 +2058,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); struct kfd_node *dev = pdd->dev; + uint32_t xcc_mask = dev->xcc_mask; int xcc = 0; /* @@ -2076,10 +2077,9 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, - pdd->process->pasid, type, - dev->start_xcc_id + xcc); + for_each_inst(xcc, xcc_mask) + amdgpu_amdkfd_flush_gpu_tlb_pasid( + dev->adev, pdd->process->pasid, type, xcc); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 2b2ae0c9902b..a3c23d07c7df 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -946,7 +946,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) seq_printf(m, " Compute queue on device %x\n", q->device->id); mqd_type = KFD_MQD_TYPE_CP; - num_xccs = q->device->num_xcc_per_node; + num_xccs = NUM_XCC(q->device->xcc_mask); break; default: seq_printf(m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c7072fff778e..d2a42b6b1fa8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -469,7 +469,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, offs, "simd_count", dev->gpu ? (dev->node_props.simd_count * - dev->gpu->num_xcc_per_node) : 0); + NUM_XCC(dev->gpu->xcc_mask)) : 0); sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", dev->node_props.mem_banks_count); sysfs_show_32bit_prop(buffer, offs, "caches_count", @@ -494,7 +494,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.wave_front_size); sysfs_show_32bit_prop(buffer, offs, "array_count", dev->gpu ? (dev->node_props.array_count * - dev->gpu->num_xcc_per_node) : 0); + NUM_XCC(dev->gpu->xcc_mask)) : 0); sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine", dev->node_props.simd_arrays_per_engine); sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array", @@ -558,7 +558,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); sysfs_show_32bit_prop(buffer, offs, "num_xcc", - dev->gpu->num_xcc_per_node); + NUM_XCC(dev->gpu->xcc_mask)); } return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", @@ -1180,7 +1180,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) buf[4] = gpu->adev->pdev->bus->number; buf[5] = lower_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size); - buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); + buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16); for (i = 0, hashout = 0; i < 8; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); From 1589c82a10852c6de742e5d6a92042a3fd68d753 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 13 Feb 2023 18:50:07 +0530 Subject: [PATCH 492/861] drm/amdgpu: Check memory ranges for valid xcp mode Check the memory ranges available to the device also for deciding a valid partition mode. Only select combinations are valid for a particular mode. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Reviewed-by: Philip Yang Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 004400fb89b0..7469de3fd6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -238,21 +238,28 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { + struct amdgpu_device *adev = xcp_mgr->adev; int num_xcc, num_xccs_per_xcp; - num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - return num_xcc > 0; + return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return (num_xcc % 4) == 0; + return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: - return (num_xcc % 3) == 0; + return (adev->gmc.num_mem_partitions == 1 || + adev->gmc.num_mem_partitions == 3) && + ((num_xcc % 3) == 0); case AMDGPU_QPX_PARTITION_MODE: num_xccs_per_xcp = num_xcc / 4; - return (num_xccs_per_xcp >= 2); + return (adev->gmc.num_mem_partitions == 1 || + adev->gmc.num_mem_partitions == 4) && + (num_xccs_per_xcp >= 2); case AMDGPU_CPX_PARTITION_MODE: - return (num_xcc > 1); + return (num_xcc > 1) && + (adev->gmc.num_mem_partitions == 1 || + adev->gmc.num_mem_partitions == num_xcc); default: return false; } From 570de94b9c5d93e1c5bc4e357946efb93c662da9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 13 Feb 2023 19:26:18 +0530 Subject: [PATCH 493/861] drm/amdgpu: Add auto mode for compute partition When auto mode is specified, driver will choose the right compute partition mode. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Reviewed-by: Philip Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++ .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 28 ++++++++++++++++++- 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f2bafab15ceb..cb9373f8c25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -242,7 +242,7 @@ extern int amdgpu_num_kcq; extern int amdgpu_vcnfw_log; extern int amdgpu_sg_display; -extern uint amdgpu_user_partt_mode; +extern int amdgpu_user_partt_mode; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f319318a8813..da4e50aef95a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -193,7 +193,7 @@ int amdgpu_smartshift_bias; int amdgpu_use_xgmi_p2p = 1; int amdgpu_vcnfw_log; int amdgpu_sg_display = -1; /* auto */ -uint amdgpu_user_partt_mode; +int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -955,8 +955,10 @@ module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); * DOC: partition_mode (int) * Used to override the default SPX mode. */ -MODULE_PARM_DESC(user_partt_mode, - "specify partition mode to be used (0 = AMDGPU_SPX_PARTITION_MODE(default value), \ +MODULE_PARM_DESC( + user_partt_mode, + "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \ + 0 = AMDGPU_SPX_PARTITION_MODE, \ 1 = AMDGPU_DPX_PARTITION_MODE, \ 2 = AMDGPU_TPX_PARTITION_MODE, \ 3 = AMDGPU_QPX_PARTITION_MODE, \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 728977f8afe7..e9c93f6e12b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -62,6 +62,8 @@ enum amdgpu_gfx_partition { AMDGPU_QPX_PARTITION_MODE = 3, AMDGPU_CPX_PARTITION_MODE = 4, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, + /* Automatically choose the right mode */ + AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2, }; #define NUM_XCC(x) hweight16(x) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 7469de3fd6fe..a165b51e9e58 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -235,6 +235,30 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, return 0; } +static enum amdgpu_gfx_partition +__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + + if (adev->gmc.num_mem_partitions == 1) + return AMDGPU_SPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == num_xcc) + return AMDGPU_CPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == num_xcc / 2) + return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE : + AMDGPU_QPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU)) + return AMDGPU_DPX_PARTITION_MODE; + + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { @@ -304,7 +328,9 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev = xcp_mgr->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) + mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); + else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) return -EINVAL; if (adev->kfd.init_complete) From 6b43e1a05cb764196c5158b2447a9bfad1f2b531 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Mon, 13 Feb 2023 23:51:07 -0500 Subject: [PATCH 494/861] drm/ttm: export ttm_pool_fini for cleanup ttm_pool_init is exported and used outside of ttm subsystem with amdgpu_ttm interface, similarly export ttm_pool_fini for proper cleanup. Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_pool.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index afc4476c446f..668abf63f2bd 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -600,6 +600,7 @@ void ttm_pool_fini(struct ttm_pool *pool) */ synchronize_shrinkers(); } +EXPORT_SYMBOL(ttm_pool_fini); /* As long as pages are available make sure to release at least one */ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, From 1e03322cfef9b83aa87ea0a508588f9f05a47dfc Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 27 Feb 2023 11:16:09 -0500 Subject: [PATCH 495/861] drm/amdgpu: Set TTM pools for memory partitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For native mode only, create TTM pool for each memory partition to store the NUMA node id, then the TTM pool will be selected using memory partition id to allocate memory from the correct partition. Acked-by: Christian König (rajneesh: changed need_swiotlb and need_dma32 to false for pool init) Reviewed-by: Felix Kuehling Acked-and-tested-by: Mukul Joshi Signed-off-by: Philip Yang Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 61 ++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c8a2d030f226..7674109810b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -631,6 +631,7 @@ struct amdgpu_ttm_tt { struct task_struct *usertask; uint32_t userflags; bool bound; + int32_t pool_id; }; #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) @@ -1059,6 +1060,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->gobj = &bo->base; + gtt->pool_id = NUMA_NO_NODE; if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) caching = ttm_write_combined; @@ -1085,6 +1087,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); + struct ttm_pool *pool; pgoff_t i; int ret; @@ -1099,7 +1102,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return 0; - ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); + if (adev->mman.ttm_pools && gtt->pool_id >= 0) + pool = &adev->mman.ttm_pools[gtt->pool_id]; + else + pool = &adev->mman.bdev.pool; + ret = ttm_pool_alloc(pool, ttm, ctx); if (ret) return ret; @@ -1120,6 +1127,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, { struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); struct amdgpu_device *adev; + struct ttm_pool *pool; pgoff_t i; amdgpu_ttm_backend_unbind(bdev, ttm); @@ -1138,7 +1146,13 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, ttm->pages[i]->mapping = NULL; adev = amdgpu_ttm_adev(bdev); - return ttm_pool_free(&adev->mman.bdev.pool, ttm); + + if (adev->mman.ttm_pools && gtt->pool_id >= 0) + pool = &adev->mman.ttm_pools[gtt->pool_id]; + else + pool = &adev->mman.bdev.pool; + + return ttm_pool_free(pool, ttm); } /** @@ -1728,6 +1742,41 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) return 0; } +static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) +{ + int i; + + if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions) + return 0; + + adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions, + sizeof(*adev->mman.ttm_pools), + GFP_KERNEL); + if (!adev->mman.ttm_pools) + return -ENOMEM; + + for (i = 0; i < adev->gmc.num_mem_partitions; i++) { + ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, + adev->gmc.mem_partitions[i].numa.node, + false, false); + } + return 0; +} + +static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev) +{ + int i; + + if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools) + return; + + for (i = 0; i < adev->gmc.num_mem_partitions; i++) + ttm_pool_fini(&adev->mman.ttm_pools[i]); + + kfree(adev->mman.ttm_pools); + adev->mman.ttm_pools = NULL; +} + /* * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1754,6 +1803,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } + + r = amdgpu_ttm_pools_init(adev); + if (r) { + DRM_ERROR("failed to init ttm pools(%d).\n", r); + return r; + } adev->mman.initialized = true; /* Initialize VRAM pool with all of VRAM divided into pages */ @@ -1901,6 +1956,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; + amdgpu_ttm_pools_fini(adev); + amdgpu_ttm_training_reserve_vram_fini(adev); /* return the stolen vga memory back to VRAM */ if (!adev->gmc.is_app_apu) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index da6544fdc8dd..8ef048a0a33e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -49,6 +49,7 @@ struct amdgpu_gtt_mgr { struct amdgpu_mman { struct ttm_device bdev; + struct ttm_pool *ttm_pools; bool initialized; void __iomem *aper_base_kaddr; From fcfefd85f18a0004c7c7b499f0701fd2c76d4c68 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Mon, 27 Feb 2023 20:08:29 -0500 Subject: [PATCH 496/861] drm/amdkfd: Native mode memory partition support For native mode, after amdgpu_bo is created on CPU domain, then call amdgpu_ttm_tt_set_mem_pool to select the TTM pool using bo->mem_id. ttm_bo_validate will allocate the memory to the correct memory partition before mapping to GPUs. Reviewed-by: Felix Kuehling Acked-and-tested-by: Mukul Joshi Signed-off-by: Philip Yang Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 18 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + 3 files changed, 26 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index bbdd5e3aa18e..59404b3e6b87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1643,6 +1643,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t aligned_size; u64 alloc_flags; int ret; + int mem_id = 0; /* Fixme : to be changed when mem_id support patch lands, until then NPS1, SPX only */ /* * Check on which domain to allocate BO @@ -1750,6 +1751,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ((*mem)->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) { bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + ret = amdgpu_ttm_tt_set_mem_pool(&bo->tbo, mem_id); + if (ret) { + pr_debug("failed to set ttm mem pool %d\n", ret); + goto err_set_mem_partition; + } } add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); @@ -1778,6 +1784,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: err_pin_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); +err_set_mem_partition: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: /* Don't unreserve system mem limit twice */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7674109810b0..3933432daaac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1155,6 +1155,24 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, return ttm_pool_free(pool, ttm); } +/** + * amdgpu_ttm_tt_set_mem_pool - Set the TTM memory pool for the TTM BO + * @tbo: The ttm_buffer_object that backs the VRAM bo + * @mem_id: to select the initialized ttm pool corresponding to the memory partition + */ +int amdgpu_ttm_tt_set_mem_pool(struct ttm_buffer_object *tbo, int mem_id) +{ + struct ttm_tt *ttm = tbo->ttm; + struct amdgpu_ttm_tt *gtt; + + if (!ttm && !ttm_tt_is_populated(ttm)) + return -EINVAL; + + gtt = ttm_to_amdgpu_ttm_tt(ttm); + gtt->pool_id = mem_id; + return 0; +} + /** * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current * task diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 8ef048a0a33e..fe32de1bf4d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -192,6 +192,7 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, unsigned long end, unsigned long *userptr); +int amdgpu_ttm_tt_set_mem_pool(struct ttm_buffer_object *tbo, int mem_id); bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); From e181be58ccc2ac48e4b79996c8dd6dd9f34fa4b5 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Mon, 27 Feb 2023 13:17:14 -0500 Subject: [PATCH 497/861] drm/amdgpu: Fix xGMI access P2P mapping failure on GFXIP 9.4.3 On GFXIP 9.4.3, we dont need to rely on xGMI hive info to determine P2P access. Reviewed-by: Felix Kuehling Acked-and-tested-by: Mukul Joshi Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 59404b3e6b87..c4b949d17e14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -814,7 +814,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, * if peer device has large BAR. In contrast, access over xGMI is * allowed for both small and large BAR configurations of peer device */ - if ((adev != bo_adev) && + if ((adev != bo_adev && !adev->gmc.is_app_apu) && ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { From a0ba127960982b8827ba8b410c272ec8f3ee7e6a Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 3 Mar 2023 18:03:00 +0530 Subject: [PATCH 498/861] drm/amdgpu: Fix unmapping of aperture When aperture size is zero, there is no mapping done. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7f62826fcaca..19a3f9ba545b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4121,7 +4121,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) adev->mman.aper_base_kaddr = NULL; /* Memory manager related */ - if (!adev->gmc.xgmi.connected_to_cpu) { + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { arch_phys_wc_del(adev->gmc.vram_mtrr); arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f431205e1077..725530eb4e0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1080,8 +1080,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev) amdgpu_ttm_fini(adev); if (drm_dev_enter(adev_to_drm(adev), &idx)) { - - if (!adev->gmc.xgmi.connected_to_cpu) { + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { arch_phys_wc_del(adev->gmc.vram_mtrr); arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3933432daaac..09d1a98bd11e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1844,12 +1844,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, adev->gmc.visible_vram_size); - else if (!adev->gmc.is_app_apu) + else if (adev->gmc.is_app_apu) + DRM_DEBUG_DRIVER( + "No need to ioremap when real vram size is 0\n"); + else #endif adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, adev->gmc.visible_vram_size); - else - DRM_DEBUG_DRIVER("No need to ioremap when real vram size is 0\n"); #endif /* From f9632096be49ed31e86541e3e79960e21e8f1578 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 7 Mar 2023 10:33:05 +0530 Subject: [PATCH 499/861] drm/amdgpu: Add compute mode descriptor function Keep a helper function to get description of compute partition mode. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 24 +----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2f7a101593e7..6098b8b1ae5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1175,33 +1175,11 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); int mode; - char *partition_mode; mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE); - switch (mode) { - case AMDGPU_SPX_PARTITION_MODE: - partition_mode = "SPX"; - break; - case AMDGPU_DPX_PARTITION_MODE: - partition_mode = "DPX"; - break; - case AMDGPU_TPX_PARTITION_MODE: - partition_mode = "TPX"; - break; - case AMDGPU_QPX_PARTITION_MODE: - partition_mode = "QPX"; - break; - case AMDGPU_CPX_PARTITION_MODE: - partition_mode = "CPX"; - break; - default: - partition_mode = "UNKNOWN"; - break; - } - - return sysfs_emit(buf, "%s\n", partition_mode); + return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode)); } static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index e9c93f6e12b8..3d11b7a0bd75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -494,4 +494,25 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); + +static inline const char *amdgpu_gfx_compute_mode_desc(int mode) +{ + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + return "SPX"; + case AMDGPU_DPX_PARTITION_MODE: + return "DPX"; + case AMDGPU_TPX_PARTITION_MODE: + return "TPX"; + case AMDGPU_QPX_PARTITION_MODE: + return "QPX"; + case AMDGPU_CPX_PARTITION_MODE: + return "CPX"; + default: + return "UNKNOWN"; + } + + return "UNKNOWN"; +} + #endif From b6b85c8b43a85988ecd06f039f8f90c041842812 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 7 Mar 2023 10:36:08 +0530 Subject: [PATCH 500/861] drm/amdgpu: Return error on invalid compute mode Return error if an invalid compute partition mode is requested. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 6 +++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a165b51e9e58..848049db00ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -328,10 +328,14 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev = xcp_mgr->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) + if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) { mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); - else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) { + dev_err(adev->dev, + "Invalid compute partition mode requested, requested: %s, available memory partitions: %d", + amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions); return -EINVAL; + } if (adev->kfd.init_complete) flags |= AMDGPU_XCP_OPS_KFD; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 81ab3cd2f229..d0ddcd751432 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1933,7 +1933,11 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); + r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, + amdgpu_user_partt_mode); + + if (r) + return r; num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { From ba08e9cb6ff87acdb2f28f013fe695a252533f0e Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 10 Mar 2023 15:38:19 +0530 Subject: [PATCH 501/861] drm/amdgpu: Add PSP spatial parition interface Add PSP ring command interface for spatial partitioning. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 21 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 2 ++ drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 9 +++++++++ 3 files changed, 32 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index ea47012795e7..d62746b596f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -991,6 +991,27 @@ static int psp_rl_load(struct amdgpu_device *adev) return ret; } +int psp_spatial_partition(struct psp_context *psp, int mode) +{ + struct psp_gfx_cmd_resp *cmd; + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; + cmd->cmd.cmd_spatial_part.mode = mode; + + dev_info(psp->adev->dev, "Requesting %d paritions through PSP", mode); + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); + + return ret; +} + static int psp_asd_initialize(struct psp_context *psp) { int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index cf4f60c66122..0a409da749d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -519,6 +519,8 @@ int psp_load_fw_list(struct psp_context *psp, struct amdgpu_firmware_info **ucode_list, int ucode_count); void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); +int psp_spatial_partition(struct psp_context *psp, int mode); + int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_sysfs_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 22c775f39119..18917df785ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -102,6 +102,7 @@ enum psp_gfx_cmd_id GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */ GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */ GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */ + GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */ }; /* PSP boot config sub-commands */ @@ -338,6 +339,13 @@ struct psp_gfx_cmd_boot_cfg uint32_t boot_config_valid; /* dynamic boot configuration valid bits bitmask */ }; +struct psp_gfx_cmd_sriov_spatial_part { + uint32_t mode; + uint32_t override_ips; + uint32_t override_xcds_avail; + uint32_t override_this_aid; +}; + /* All GFX ring buffer commands. */ union psp_gfx_commands { @@ -351,6 +359,7 @@ union psp_gfx_commands struct psp_gfx_cmd_setup_tmr cmd_setup_vmr; struct psp_gfx_cmd_load_toc cmd_load_toc; struct psp_gfx_cmd_boot_cfg boot_cfg; + struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part; }; struct psp_gfx_uresp_reserved From 63630c9e5c3481c51ac3b4ee058628a01ac91ba8 Mon Sep 17 00:00:00 2001 From: Gavin Wan Date: Thu, 16 Mar 2023 13:44:41 -0400 Subject: [PATCH 502/861] drm/amdgpu: Add PSP supporting PSP 13.0.6 SRIOV ucode init. Add PSP supporting PSP 13.0.6 SRIOV ucode init. Signed-off-by: Gavin Wan Reviewed-by: Yang Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index d62746b596f5..ec79a5c2f500 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -146,6 +146,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp) case IP_VERSION(13, 0, 0): adev->virt.autoload_ucode_id = 0; break; + case IP_VERSION(13, 0, 6): + ret = psp_init_cap_microcode(psp, ucode_prefix); + break; case IP_VERSION(13, 0, 10): adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; ret = psp_init_cap_microcode(psp, ucode_prefix); From b0a3bbf947f6ed690336cec1f6cde2a30d082dbb Mon Sep 17 00:00:00 2001 From: Gavin Wan Date: Mon, 3 Apr 2023 17:49:41 -0400 Subject: [PATCH 503/861] drm/amdgpu: Skip using MC FB Offset when APU flag is set for SRIOV. The MC_VM_FB_OFFSET is PF only register. It cannot be read on VF. So, the driver should not use MC_VM_FB_OFFSET address to set the address of dev->gmc.aper_base. Signed-off-by: Gavin Wan Reviewed-by: Zhigang Luo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 1653d77df3ba..58bcd1e1f1b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1644,7 +1644,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) */ /* check whether both host-gpu and gpu-gpu xgmi links exist */ - if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || + if ((!amdgpu_sriov_vf(adev) && + (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)) { adev->gmc.aper_base = From 46f7b4deb30558593c1d2e62d561a3cee21f558a Mon Sep 17 00:00:00 2001 From: Gavin Wan Date: Mon, 10 Apr 2023 15:04:26 -0400 Subject: [PATCH 504/861] drm/amdgpu: Set memory partitions to 1 for SRIOV. For SRIOV, the memory partitions are set on host drover. Each VF only has one memory partition. We need set the memory partitions to 1 on guest driver for SRIOV. V2: sqaush in fix ("drm/amdgpu: Fix memory range info of GC 9.4.3 VFs") Signed-off-by: Gavin Wan Acked-by: Zhigang Luo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 58bcd1e1f1b6..95c3253e240a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1346,6 +1346,9 @@ gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) static enum amdgpu_memory_partition gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) + return AMDGPU_NPS1_PARTITION_MODE; + return gmc_v9_0_get_memory_partition(adev, NULL); } @@ -1897,7 +1900,10 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) else gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); - valid = gmc_v9_0_validate_partition_info(adev); + if (amdgpu_sriov_vf(adev)) + valid = true; + else + valid = gmc_v9_0_validate_partition_info(adev); if (!valid) { /* TODO: handle invalid case */ dev_WARN(adev->dev, From b4520bfd801338c48d887f0eec74a53ab80f2167 Mon Sep 17 00:00:00 2001 From: Gavin Wan Date: Fri, 17 Mar 2023 18:42:30 -0400 Subject: [PATCH 505/861] drm/amdgpu: Checked if the pointer NULL before use it. For SRIOV on some parts, the host driver does not post VBIOS. So the guest cannot get bios information. Therefore, adev->virt.fw_reserve.p_pf2vf and adev->mode_info.atom_context are NULL. Signed-off-by: Gavin Wan Reviewed-by: Zhigang Luo Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 19a3f9ba545b..0c6f983fb2ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3855,21 +3855,24 @@ int amdgpu_device_init(struct amdgpu_device *adev, } /* enable PCIE atomic ops */ - if (amdgpu_sriov_vf(adev)) - adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) - adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == - (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (amdgpu_sriov_vf(adev)) { + if (adev->virt.fw_reserve.p_pf2vf) + adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) + adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == + (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a * internal path natively support atomics, set have_atomics_support to true. */ - else if ((adev->flags & AMD_IS_APU) && - (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) + } else if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) { adev->have_atomics_support = true; - else + } else { adev->have_atomics_support = !pci_enable_atomic_ops_to_root(adev->pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + } + if (!adev->have_atomics_support) dev_info(adev->dev, "PCIE atomic ops is not supported\n"); @@ -3885,7 +3888,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_reset_init(adev); /* detect if we are with an SRIOV vbios */ - amdgpu_device_detect_sriov_bios(adev); + if (adev->bios) + amdgpu_device_detect_sriov_bios(adev); /* check if we need to reset the asic * E.g., driver was not cleanly unloaded previously, etc. From 6c2bebfca43c14967cfb9cf6c2c074b0d755ddd7 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 1 Mar 2023 10:05:17 +0800 Subject: [PATCH 506/861] drm/amdgpu: Add vcn/jpeg ras err status registers Add new ras error status registers introduced in vcn v4_0_3 to log vcn and jpeg ras error. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- .../include/asic_reg/vcn/vcn_4_0_3_offset.h | 78 +++ .../include/asic_reg/vcn/vcn_4_0_3_sh_mask.h | 495 ++++++++++++++++++ 2 files changed, 573 insertions(+) diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h index facad93cd06f..e9742d10de1c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h @@ -1500,6 +1500,84 @@ #define regVCN_RAS_CNTL_MMSCH 0x0914 #define regVCN_RAS_CNTL_MMSCH_BASE_IDX 1 +// addressBlock: aid_uvd0_vcn_edcc_dec +// base address: 0x21d20 +#define regVCN_UE_ERR_STATUS_LO_VIDD 0x094c +#define regVCN_UE_ERR_STATUS_LO_VIDD_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_VIDD 0x094d +#define regVCN_UE_ERR_STATUS_HI_VIDD_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_VIDV 0x094e +#define regVCN_UE_ERR_STATUS_LO_VIDV_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_VIDV 0x094f +#define regVCN_UE_ERR_STATUS_HI_VIDV_BASE_IDX 1 +#define regVCN_CE_ERR_STATUS_LO_MMSCHD 0x0950 +#define regVCN_CE_ERR_STATUS_LO_MMSCHD_BASE_IDX 1 +#define regVCN_CE_ERR_STATUS_HI_MMSCHD 0x0951 +#define regVCN_CE_ERR_STATUS_HI_MMSCHD_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG0S 0x0952 +#define regVCN_UE_ERR_STATUS_LO_JPEG0S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG0S 0x0953 +#define regVCN_UE_ERR_STATUS_HI_JPEG0S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG0D 0x0954 +#define regVCN_UE_ERR_STATUS_LO_JPEG0D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG0D 0x0955 +#define regVCN_UE_ERR_STATUS_HI_JPEG0D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG1S 0x0956 +#define regVCN_UE_ERR_STATUS_LO_JPEG1S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG1S 0x0957 +#define regVCN_UE_ERR_STATUS_HI_JPEG1S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG1D 0x0958 +#define regVCN_UE_ERR_STATUS_LO_JPEG1D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG1D 0x0959 +#define regVCN_UE_ERR_STATUS_HI_JPEG1D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG2S 0x095a +#define regVCN_UE_ERR_STATUS_LO_JPEG2S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG2S 0x095b +#define regVCN_UE_ERR_STATUS_HI_JPEG2S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG2D 0x095c +#define regVCN_UE_ERR_STATUS_LO_JPEG2D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG2D 0x095d +#define regVCN_UE_ERR_STATUS_HI_JPEG2D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG3S 0x095e +#define regVCN_UE_ERR_STATUS_LO_JPEG3S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG3S 0x095f +#define regVCN_UE_ERR_STATUS_HI_JPEG3S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG3D 0x0960 +#define regVCN_UE_ERR_STATUS_LO_JPEG3D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG3D 0x0961 +#define regVCN_UE_ERR_STATUS_HI_JPEG3D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG4S 0x0962 +#define regVCN_UE_ERR_STATUS_LO_JPEG4S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG4S 0x0963 +#define regVCN_UE_ERR_STATUS_HI_JPEG4S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG4D 0x0964 +#define regVCN_UE_ERR_STATUS_LO_JPEG4D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG4D 0x0965 +#define regVCN_UE_ERR_STATUS_HI_JPEG4D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG5S 0x0966 +#define regVCN_UE_ERR_STATUS_LO_JPEG5S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG5S 0x0967 +#define regVCN_UE_ERR_STATUS_HI_JPEG5S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG5D 0x0968 +#define regVCN_UE_ERR_STATUS_LO_JPEG5D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG5D 0x0969 +#define regVCN_UE_ERR_STATUS_HI_JPEG5D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG6S 0x096a +#define regVCN_UE_ERR_STATUS_LO_JPEG6S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG6S 0x096b +#define regVCN_UE_ERR_STATUS_HI_JPEG6S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG6D 0x096c +#define regVCN_UE_ERR_STATUS_LO_JPEG6D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG6D 0x096d +#define regVCN_UE_ERR_STATUS_HI_JPEG6D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG7S 0x096e +#define regVCN_UE_ERR_STATUS_LO_JPEG7S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG7S 0x096f +#define regVCN_UE_ERR_STATUS_HI_JPEG7S_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_LO_JPEG7D 0x0970 +#define regVCN_UE_ERR_STATUS_LO_JPEG7D_BASE_IDX 1 +#define regVCN_UE_ERR_STATUS_HI_JPEG7D 0x0971 +#define regVCN_UE_ERR_STATUS_HI_JPEG7D_BASE_IDX 1 // addressBlock: aid_uvd0_uvd_jrbc1_uvd_jrbc_dec // base address: 0x1e000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h index be643ea0e569..5bd8111bf04a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h @@ -5312,6 +5312,501 @@ #define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L #define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L +// addressBlock: aid_uvd0_vcn_edcc_dec +//VCN_UE_ERR_STATUS_LO_VIDD +#define VCN_UE_ERR_STATUS_LO_VIDD__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_VIDD__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_VIDD__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_VIDD__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_VIDD__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_VIDD__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_VIDD__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_VIDD__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_VIDD +#define VCN_UE_ERR_STATUS_HI_VIDD__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_VIDD__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_VIDD__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_VIDD__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_VIDD__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_VIDD__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_VIDD__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_VIDD__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_VIDD__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_VIDD__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_VIDD__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_VIDD__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_VIDV +#define VCN_UE_ERR_STATUS_LO_VIDV__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_VIDV__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_VIDV__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_VIDV__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_VIDV__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_VIDV__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_VIDV__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_VIDV__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_VIDV +#define VCN_UE_ERR_STATUS_HI_VIDV__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_VIDV__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_VIDV__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_VIDV__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_VIDV__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_VIDV__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_VIDV__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_VIDV__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_VIDV__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_VIDV__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_VIDV__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_VIDV__Err_clr_MASK 0x80000000L +//VCN_CE_ERR_STATUS_LO_MMSCHD +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address_Valid_Flag__SHIFT 0x1 +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address__SHIFT 0x2 +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Memory_id__SHIFT 0x18 +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address_Valid_Flag_MASK 0x00000002L +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address_MASK 0x00FFFFFCL +#define VCN_CE_ERR_STATUS_LO_MMSCHD__Memory_id_MASK 0xFF000000L +//VCN_CE_ERR_STATUS_HI_MMSCHD +#define VCN_CE_ERR_STATUS_HI_MMSCHD__ECC__SHIFT 0x0 +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Other__SHIFT 0x1 +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info__SHIFT 0x3 +#define VCN_CE_ERR_STATUS_HI_MMSCHD__CE_Cnt__SHIFT 0x17 +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Poison__SHIFT 0x1c +#define VCN_CE_ERR_STATUS_HI_MMSCHD__RESERVED__SHIFT 0x1d +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_clr__SHIFT 0x1f +#define VCN_CE_ERR_STATUS_HI_MMSCHD__ECC_MASK 0x00000001L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Other_MASK 0x00000002L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info_MASK 0x007FFFF8L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__CE_Cnt_MASK 0x03800000L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Poison_MASK 0x10000000L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__RESERVED_MASK 0x60000000L +#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG0S +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG0S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG0S +#define VCN_UE_ERR_STATUS_HI_JPEG0S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG0S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG0S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG0S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG0S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG0D +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG0D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG0D +#define VCN_UE_ERR_STATUS_HI_JPEG0D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG0D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG0D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG0D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG0D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG1S +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG1S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG1S +#define VCN_UE_ERR_STATUS_HI_JPEG1S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG1S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG1S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG1S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG1S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG1D +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG1D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG1D +#define VCN_UE_ERR_STATUS_HI_JPEG1D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG1D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG1D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG1D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG1D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG2S +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG2S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG2S +#define VCN_UE_ERR_STATUS_HI_JPEG2S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG2S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG2S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG2S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG2S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG2D +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG2D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG2D +#define VCN_UE_ERR_STATUS_HI_JPEG2D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG2D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG2D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG2D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG2D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG3S +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG3S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG3S +#define VCN_UE_ERR_STATUS_HI_JPEG3S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG3S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG3S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG3S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG3S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG3D +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG3D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG3D +#define VCN_UE_ERR_STATUS_HI_JPEG3D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG3D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG3D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG3D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG3D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG4S +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG4S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG4S +#define VCN_UE_ERR_STATUS_HI_JPEG4S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG4S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG4S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG4S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG4S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG4D +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG4D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG4D +#define VCN_UE_ERR_STATUS_HI_JPEG4D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG4D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG4D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG4D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG4D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG5S +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG5S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG5S +#define VCN_UE_ERR_STATUS_HI_JPEG5S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG5S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG5S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG5S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG5S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG5D +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG5D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG5D +#define VCN_UE_ERR_STATUS_HI_JPEG5D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG5D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG5D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG5D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG5D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG6S +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG6S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG6S +#define VCN_UE_ERR_STATUS_HI_JPEG6S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG6S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG6S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG6S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG6S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG6D +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG6D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG6D +#define VCN_UE_ERR_STATUS_HI_JPEG6D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG6D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG6D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG6D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG6D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG7S +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG7S__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG7S +#define VCN_UE_ERR_STATUS_HI_JPEG7S__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG7S__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG7S__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG7S__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG7S__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_clr_MASK 0x80000000L +//VCN_UE_ERR_STATUS_LO_JPEG7D +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Err_Status_Valid_Flag__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address_Valid_Flag__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Memory_id__SHIFT 0x18 +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Err_Status_Valid_Flag_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address_Valid_Flag_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address_MASK 0x00FFFFFCL +#define VCN_UE_ERR_STATUS_LO_JPEG7D__Memory_id_MASK 0xFF000000L +//VCN_UE_ERR_STATUS_HI_JPEG7D +#define VCN_UE_ERR_STATUS_HI_JPEG7D__ECC__SHIFT 0x0 +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Parity__SHIFT 0x1 +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info_Valid_Flag__SHIFT 0x2 +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info__SHIFT 0x3 +#define VCN_UE_ERR_STATUS_HI_JPEG7D__UE_Cnt__SHIFT 0x17 +#define VCN_UE_ERR_STATUS_HI_JPEG7D__FED_Cnt__SHIFT 0x1a +#define VCN_UE_ERR_STATUS_HI_JPEG7D__RESERVED__SHIFT 0x1d +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_clr__SHIFT 0x1f +#define VCN_UE_ERR_STATUS_HI_JPEG7D__ECC_MASK 0x00000001L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Parity_MASK 0x00000002L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info_Valid_Flag_MASK 0x00000004L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info_MASK 0x007FFFF8L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__UE_Cnt_MASK 0x03800000L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__FED_Cnt_MASK 0x1C000000L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__RESERVED_MASK 0x60000000L +#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_clr_MASK 0x80000000L // addressBlock: aid_uvd0_uvd_jrbc0_uvd_jrbc_dec //UVD_JRBC0_UVD_JRBC_RB_WPTR From 5e1e227fb7cbea2624b4b9375a9b888d02fed4f5 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 1 Mar 2023 20:37:56 +0800 Subject: [PATCH 507/861] drm/amdgpu: Add query_ras_error_count for vcn v4_0_3 Add query_ras_error_count callback for vcn v4_0_3. It will be used to query and log vcn error count. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 36 +++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 9d0c3dc76547..e5037d6f884b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -1430,3 +1430,39 @@ const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = { .rev = 3, .funcs = &vcn_v4_0_3_ip_funcs, }; + +static const struct amdgpu_ras_err_status_reg_entry vcn_v4_0_3_ue_reg_list[] = { + {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDD, regVCN_UE_ERR_STATUS_HI_VIDD), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDD"}, + {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDV, regVCN_UE_ERR_STATUS_HI_VIDV), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDV"}, +}; + +static void vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev, + uint32_t vcn_inst, + void *ras_err_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; + + /* vcn v4_0_3 only support query uncorrectable errors */ + amdgpu_ras_inst_query_ras_error_count(adev, + vcn_v4_0_3_ue_reg_list, + ARRAY_SIZE(vcn_v4_0_3_ue_reg_list), + NULL, 0, GET_INST(VCN, vcn_inst), + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &err_data->ue_count); +} + +static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev, + void *ras_err_status) +{ + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) { + dev_warn(adev->dev, "VCN RAS is not supported\n"); + return; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status); +} From 6d39fa3fc802e511241898a6890a9b2ec7f958e3 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 2 Mar 2023 14:23:47 +0800 Subject: [PATCH 508/861] drm/amdgpu: Add reset_ras_error_count for vcn v4_0_3 Add reset_ras_error_count callback for vcn v4_0_3. It will be used to reset vcn ras error count. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index e5037d6f884b..e8933039bcd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -1466,3 +1466,25 @@ static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < adev->vcn.num_vcn_inst; i++) vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status); } + +static void vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev, + uint32_t vcn_inst) +{ + amdgpu_ras_inst_reset_ras_error_count(adev, + vcn_v4_0_3_ue_reg_list, + ARRAY_SIZE(vcn_v4_0_3_ue_reg_list), + GET_INST(VCN, vcn_inst)); +} + +static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) { + dev_warn(adev->dev, "VCN RAS is not supported\n"); + return; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + vcn_v4_0_3_inst_reset_ras_error_count(adev, i); +} From c3f05ab8c40f8a5a8576e3d936cff450a39b0360 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 6 Mar 2023 11:00:11 +0800 Subject: [PATCH 509/861] drm/amdgpu: Initialize vcn v4_0_3 ras function Initialize vcn v4_0_3 ras function Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index e8933039bcd6..7558095ecf6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -51,6 +51,7 @@ static int vcn_v4_0_3_set_powergating_state(void *handle, static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); +static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); /** * vcn_v4_0_3_early_init - set function pointers @@ -68,6 +69,7 @@ static int vcn_v4_0_3_early_init(void *handle) vcn_v4_0_3_set_unified_ring_funcs(adev); vcn_v4_0_3_set_irq_funcs(adev); + vcn_v4_0_3_set_ras_funcs(adev); return amdgpu_vcn_early_init(adev); } @@ -130,6 +132,14 @@ static int vcn_v4_0_3_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode; + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) { + r = amdgpu_vcn_ras_sw_init(adev); + if (r) { + dev_err(adev->dev, "Failed to initialize vcn ras block!\n"); + return r; + } + } + return 0; } @@ -1488,3 +1498,19 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) vcn_v4_0_3_inst_reset_ras_error_count(adev, i); } + +static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = { + .query_ras_error_count = vcn_v4_0_3_query_ras_error_count, + .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count, +}; + +static struct amdgpu_vcn_ras vcn_v4_0_3_ras = { + .ras_block = { + .hw_ops = &vcn_v4_0_3_ras_hw_ops, + }, +}; + +static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->vcn.ras = &vcn_v4_0_3_ras; +} From 85f23b0a8ce31f1e22d7bfb4666b4a7830563347 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 2 Mar 2023 16:38:38 +0800 Subject: [PATCH 510/861] drm/amdgpu: Re-enable VCN RAS if DPG is enabled VCN RAS enablement sequence needs to be added in DPG HW init sequence. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 27 ++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 7558095ecf6a..c77ceaf53dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -52,7 +52,8 @@ static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); - +static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, + int inst_idx, bool indirect); /** * vcn_v4_0_3_early_init - set function pointers * @@ -769,6 +770,8 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); + vcn_v4_0_3_enable_ras(adev, inst_idx, indirect); + /* enable master interrupt */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, regUVD_MASTINT_EN), @@ -1514,3 +1517,25 @@ static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev) { adev->vcn.ras = &vcn_v4_0_3_ras; } + +static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, + int inst_idx, bool indirect) +{ + uint32_t tmp; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + return; + + tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK | + VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK | + VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK | + VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, + SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL), + tmp, 0, indirect); + + tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, + SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN), + tmp, 0, indirect); +} From 41e491d8b606ea55b7234967f802cec8e6d77952 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 2 Mar 2023 17:56:59 +0800 Subject: [PATCH 511/861] drm/amdgpu: Add query_ras_error_count for jpeg v4_0_3 Add query_ras_error_count callback for jpeg v4_0_3. It will be used to query and log jpeg error count. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 64 ++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index ea9cb098a144..5dedba91fa32 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -960,3 +960,67 @@ const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = { .rev = 3, .funcs = &jpeg_v4_0_3_ip_funcs, }; + +static const struct amdgpu_ras_err_status_reg_entry jpeg_v4_0_3_ue_reg_list[] = { + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0S, regVCN_UE_ERR_STATUS_HI_JPEG0S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0D, regVCN_UE_ERR_STATUS_HI_JPEG0D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1S, regVCN_UE_ERR_STATUS_HI_JPEG1S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1D, regVCN_UE_ERR_STATUS_HI_JPEG1D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2S, regVCN_UE_ERR_STATUS_HI_JPEG2S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2D, regVCN_UE_ERR_STATUS_HI_JPEG2D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3S, regVCN_UE_ERR_STATUS_HI_JPEG3S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3D, regVCN_UE_ERR_STATUS_HI_JPEG3D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4S, regVCN_UE_ERR_STATUS_HI_JPEG4S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4D, regVCN_UE_ERR_STATUS_HI_JPEG4D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5S, regVCN_UE_ERR_STATUS_HI_JPEG5S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5D, regVCN_UE_ERR_STATUS_HI_JPEG5D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6S, regVCN_UE_ERR_STATUS_HI_JPEG6S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6D, regVCN_UE_ERR_STATUS_HI_JPEG6D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6D"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7S, regVCN_UE_ERR_STATUS_HI_JPEG7S), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7S"}, + {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7D, regVCN_UE_ERR_STATUS_HI_JPEG7D), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7D"}, +}; + +static void jpeg_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev, + uint32_t jpeg_inst, + void *ras_err_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; + + /* jpeg v4_0_3 only support uncorrectable errors */ + amdgpu_ras_inst_query_ras_error_count(adev, + jpeg_v4_0_3_ue_reg_list, + ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list), + NULL, 0, GET_INST(VCN, jpeg_inst), + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &err_data->ue_count); +} + +static void jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device *adev, + void *ras_err_status) +{ + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { + dev_warn(adev->dev, "JPEG RAS is not supported\n"); + return; + } + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) + jpeg_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status); +} From 570df4bca6187f493a1315a7373d7eb1285b3e86 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 2 Mar 2023 18:04:24 +0800 Subject: [PATCH 512/861] drm/amdgpu: Add reset_ras_error_count for jpeg v4_0_3 Add reset_ras_error_count callback for jpeg v4_0_3. It will be used to reset jpeg ras error count. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 5dedba91fa32..21226d6d26f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -1024,3 +1024,25 @@ static void jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) jpeg_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status); } + +static void jpeg_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev, + uint32_t jpeg_inst) +{ + amdgpu_ras_inst_reset_ras_error_count(adev, + jpeg_v4_0_3_ue_reg_list, + ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list), + GET_INST(VCN, jpeg_inst)); +} + +static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { + dev_warn(adev->dev, "JPEG RAS is not supported\n"); + return; + } + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) + jpeg_v4_0_3_inst_reset_ras_error_count(adev, i); +} From 35d54e21e002198c13647b6cd8c77586f683cf39 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 6 Mar 2023 11:03:27 +0800 Subject: [PATCH 513/861] drm/amdgpu: Initialize jpeg v4_0_3 ras function Initialize jpeg v4_0_3 ras function. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 26 ++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 21226d6d26f8..ede15a3a4701 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -40,6 +40,7 @@ static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v4_0_3_set_powergating_state(void *handle, enum amd_powergating_state state); +static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); static int amdgpu_ih_srcid_jpeg[] = { VCN_4_0__SRCID__JPEG_DECODE, @@ -67,6 +68,7 @@ static int jpeg_v4_0_3_early_init(void *handle) jpeg_v4_0_3_set_dec_ring_funcs(adev); jpeg_v4_0_3_set_irq_funcs(adev); + jpeg_v4_0_3_set_ras_funcs(adev); return 0; } @@ -126,6 +128,14 @@ static int jpeg_v4_0_3_sw_init(void *handle) } } + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) { + r = amdgpu_jpeg_ras_sw_init(adev); + if (r) { + dev_err(adev->dev, "Failed to initialize jpeg ras block!\n"); + return r; + } + } + return 0; } @@ -1046,3 +1056,19 @@ static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) jpeg_v4_0_3_inst_reset_ras_error_count(adev, i); } + +static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = { + .query_ras_error_count = jpeg_v4_0_3_query_ras_error_count, + .reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count, +}; + +static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = { + .ras_block = { + .hw_ops = &jpeg_v4_0_3_ras_hw_ops, + }, +}; + +static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.ras = &jpeg_v4_0_3_ras; +} From 9b337b7d628a5e97b4dd72bb1d75f1716567b416 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 20 Mar 2023 17:51:30 +0800 Subject: [PATCH 514/861] drm/amdgpu: Adjust the sequence to query ras error info It turns out STATUS_VALID_FLAG needs to be checked ahead of any other fields. ADDRESS_VALID_FLAG and ERR_INFO_VALID_FLAG only manages ADDRESS and ERR_INFO field respectively. driver should continue poll ERR CNT field even ERR_INFO_VALD_FLAG is not set. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 5ae89602a116..64f80e8cbd63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -3164,7 +3164,8 @@ bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) && !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG)) - return false; + /* keep the check here in case we need to refer to the result later */ + dev_dbg(adev->dev, "Invalid err_info field\n"); /* read err count */ *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT); @@ -3187,17 +3188,17 @@ void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, uint32_t i, j; for (i = 0; i < reg_list_size; i++) { + /* query memory_id from err_status_lo */ + if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i], + instance, &memory_id)) + continue; + /* query err_cnt from err_status_hi */ if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i], instance, &err_cnt) || !err_cnt) continue; - /* query memory_id from err_status_lo */ - if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i], - instance, &memory_id)) - continue; - *err_count += err_cnt; /* log the errors */ From 3697b9bd7c69910cb6543d8441211ecfb2f013ca Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 15 Mar 2023 14:04:33 -0400 Subject: [PATCH 515/861] drm/amdkfd: Increase queue number per process to 255 on GFX9.4.3 Increase the maximum number of queues that can be created per process to 255 on GFX 9.4.3. There is no HWS limitation restricting the number queues that can be created. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index a3c23d07c7df..b100933340d2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -242,6 +242,13 @@ int pqm_create_queue(struct process_queue_manager *pqm, enum kfd_queue_type type = properties->type; unsigned int max_queues = 127; /* HWS limit */ + /* + * On GFX 9.4.3, increase the number of queues that + * can be created to 255. No HWS limit on GFX 9.4.3. + */ + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) + max_queues = 255; + q = NULL; kq = NULL; From 1e91a5f79110b96baf7ad21d3c7b5c3e18cdf2a5 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 16 Mar 2023 14:59:27 +0530 Subject: [PATCH 516/861] drm/amdgpu: Fix register accesses in GFX v9.4.3 Access registers with the right xcc id. Also, remove the unused logic as PG is not used in GFX v9.4.3 Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index d0ddcd751432..7ef2c9b515ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1004,16 +1004,6 @@ static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) */ if (adev->gfx.rlc.is_rlc_v2_1) gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); - - if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_GDS | - AMD_PG_SUPPORT_RLC_SMU_HS)) { - WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_JUMP_TABLE_RESTORE, - adev->gfx.rlc.cp_table_gpu_addr >> 8); - } } static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) @@ -1071,7 +1061,7 @@ static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) /* wait for RLC_SAFE_MODE */ for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -1107,7 +1097,7 @@ static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); for (k = 0; k < adev->usec_timeout; k++) { - if (RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SERDES_CU_MASTER_BUSY) == 0) + if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0) break; udelay(1); } @@ -1131,7 +1121,7 @@ static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; for (k = 0; k < adev->usec_timeout; k++) { - if ((RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) + if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) break; udelay(1); } From 00e1ab02c2ba31b2bd446979949193eb3ca2561c Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 24 Mar 2023 15:21:30 +0530 Subject: [PATCH 517/861] drm/amdgpu: Skip halting RLC on GFX v9.4.3 RLC-PMFW handshake happens periodically when GFXCLK DPM is enabled and halting RLC may cause unexpected results. Avoid halting RLC from driver side. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 7ef2c9b515ef..6cde05421a10 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1256,21 +1256,20 @@ static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) { int r; - gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); - - /* disable CG */ - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); - - gfx_v9_4_3_xcc_init_pg(adev, xcc_id); - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); /* legacy rlc firmware loading */ r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); if (r) return r; + gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); } - gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); + amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); + /* disable CG */ + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); + gfx_v9_4_3_xcc_init_pg(adev, xcc_id); + amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); return 0; } @@ -1967,14 +1966,6 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); - - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); - return; - } - - gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); } static int gfx_v9_4_3_hw_init(void *handle) From 610dab118ff5013d46069c828b58d576e0907b66 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 31 Mar 2023 11:13:40 -0400 Subject: [PATCH 518/861] drm/amdkfd: Move pgmap to amdgpu_kfd_dev structure VRAM pgmap resource is allocated every time when switching compute partitions because kfd_dev is re-initialized by post_partition_switch, As a result, it causes memory region resource leaking and system memory usage accounting unbalanced. pgmap resource should be allocated and registered only once when loading driver and freed when unloading driver, move it from kfd_dev to amdgpu_kfd_dev. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 +++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index d1d643a050a1..e4e1dbba060a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include "amdgpu_sync.h" @@ -101,6 +102,9 @@ struct amdgpu_kfd_dev { uint64_t vram_used_aligned; bool init_complete; struct work_struct reset_work; + + /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ + struct dev_pagemap pgmap; }; enum kgd_engine_type { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 42e599912e52..199d32c7c289 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -206,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) { - return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; + return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; } static void @@ -236,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page) unsigned long addr; addr = page_to_pfn(page) << PAGE_SHIFT; - return (addr - adev->kfd.dev->pgmap.range.start); + return (addr - adev->kfd.pgmap.range.start); } static struct page * @@ -990,14 +990,14 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { int svm_migrate_init(struct amdgpu_device *adev) { - struct kfd_dev *kfddev = adev->kfd.dev; + struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; struct resource *res = NULL; unsigned long size; void *r; /* Page migration works on Vega10 or newer */ - if (!KFD_IS_SOC15(kfddev)) + if (!KFD_IS_SOC15(kfddev->dev)) return -EINVAL; pgmap = &kfddev->pgmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 02a90fd7f646..214d950f948e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -378,9 +378,6 @@ struct kfd_dev { int noretry; - /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ - struct dev_pagemap pgmap; - struct kfd_node *nodes[MAX_KFD_NODES]; unsigned int num_nodes; }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index cf354f9e4285..2b2129dd1e4a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -174,7 +174,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - - bo_adev->kfd.dev->pgmap.range.start; + bo_adev->kfd.pgmap.range.start; addr[i] |= SVM_RANGE_VRAM_DOMAIN; pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); continue; @@ -2827,7 +2827,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, bool migration = false; int r = 0; - if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { + if (!KFD_IS_SVM_API_SUPPORTED(adev)) { pr_debug("device does not support SVM\n"); return -EFAULT; } @@ -3112,7 +3112,7 @@ int svm_range_list_init(struct kfd_process *p) spin_lock_init(&svms->deferred_list_lock); for (i = 0; i < p->n_pdds; i++) - if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd)) + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev)) bitmap_set(svms->bitmap_supported, i, 1); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 7515ddade3ae..021def496f5a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -200,8 +200,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. */ -#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0 ||\ - (dev)->adev->gmc.is_app_apu) +#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ + (adev)->gmc.is_app_apu) void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index d2a42b6b1fa8..6d6243b978e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -2021,7 +2021,7 @@ int kfd_topology_add_device(struct kfd_node *gpu) dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) + if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; kfd_debug_print_topology(); From fc021438d0ab7863dc93f84a557af6dc6255b881 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 19 Apr 2023 17:43:26 -0400 Subject: [PATCH 519/861] drm/amdgpu: Enable NPS4 CPX mode CPX compute mode is valid mode for NPS4 memory partition mode. Signed-off-by: Philip Yang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 848049db00ab..97011e7e031d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -281,9 +281,9 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev->gmc.num_mem_partitions == 4) && (num_xccs_per_xcp >= 2); case AMDGPU_CPX_PARTITION_MODE: - return (num_xcc > 1) && - (adev->gmc.num_mem_partitions == 1 || - adev->gmc.num_mem_partitions == num_xcc); + return ((num_xcc > 1) && + (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) && + (num_xcc % adev->gmc.num_mem_partitions) == 0); default: return false; } From f915f3af9984464c308787102990d85d4e988d2c Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Fri, 28 Apr 2023 14:20:00 -0400 Subject: [PATCH 520/861] drm/amdgpu: For GFX 9.4.3 APU fix vram_usage value For GFX 9.4.3 APP APU VRAM is allocated in GTT domain. While freeing memory check for GTT domain instead of VRAM if it is APP APU Signed-off-by: Harish Kasiviswanathan Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c4b949d17e14..fd395cd61b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1886,11 +1886,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Update the size of the BO being freed if it was allocated from - * VRAM and is not imported. + * VRAM and is not imported. For APP APU VRAM allocations are done + * in GTT domain */ if (size) { - if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && - (!is_imported)) + if (!is_imported && + (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || + (adev->gmc.is_app_apu && + mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) *size = bo_size; else *size = 0; From 2e8cc5d317d12f7fb4f66361a3ce5427f0abe2cd Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Wed, 8 Feb 2023 11:10:57 -0500 Subject: [PATCH 521/861] drm/amdgpu: Use legacy TLB flush for gfx943 Invalidate TLBs via a legacy flush request (flush_type=0) prior to the heavyweight flush requests (flush_type=2) in gmc_v9_0.c. This is temporarily required to mitigate a bug causing CPC UTCL1 to return stale translations after invalidation requests in address range mode. v2: squash in long term fix "drm/amdgpu: disable extra gfx943 legacy flush on rev1+" Signed-off-by: Graham Sider Reviewed-by: Philip Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 95c3253e240a..2eb67b53e497 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -833,6 +833,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ inv_req = gmc_v9_0_get_invalidate_req(vmid, 2); inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); + } else if (flush_type == 2 && + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && + adev->rev_id == 0) { + inv_req = gmc_v9_0_get_invalidate_req(vmid, 0); + inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); } else { inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); inv_req2 = 0; @@ -976,6 +981,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (vega20_xgmi_wa) kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); + + if (flush_type == 2 && + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && + adev->rev_id == 0) + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, 0, all_hub); + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); From 895797d9193b38e759bc01268a8e3887e521f682 Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Mon, 6 Feb 2023 14:04:42 -0500 Subject: [PATCH 522/861] drm/amdgpu/bu: Add use_mtype_cc_wa module param By default, set use_mtype_cc_wa to 1 to set PTE coherence flag MTYPE_CC instead of MTYPE_RW by default. This is required for the time being to mitigate a bug causing XCCs to hit stale data due to TCC marking fully dirty lines as exclusive. Signed-off-by: Graham Sider Reviewed-by: Joseph Greathouse Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 +++++++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cb9373f8c25a..cd2a29a7e26d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -212,6 +212,7 @@ extern int amdgpu_noretry; extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; +extern bool amdgpu_use_mtype_cc_wa; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index da4e50aef95a..8bc37826a99f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -822,6 +822,13 @@ MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault ( module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444); #endif +/** + * DOC: use_mtype_cc_wa (bool) + */ +bool amdgpu_use_mtype_cc_wa = true; +MODULE_PARM_DESC(use_mtype_cc_wa, "Use MTYPE_CC workaround (0 = use MTYPE_RW where applicable, 1 = use MTYPE_CC where applicable (default))"); +module_param_named(use_mtype_cc_wa, amdgpu_use_mtype_cc_wa, bool, 0444); + /** * DOC: pcie_p2p (bool) * Enable PCIe P2P (requires large-BAR). Default value: true (on) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2eb67b53e497..8623b93c05ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1187,6 +1187,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; unsigned int mtype; + unsigned int mtype_default; bool snoop = false; switch (adev->ip_versions[GC_HWIP][0]) { @@ -1230,7 +1231,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, /* FIXME: Needs more work for handling multiple memory * partitions (> NPS1 mode) e.g. NPS4 for both APU and dGPU * modes. + * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. + * To force use of MTYPE_RW, set use_mtype_cc_wa=0 */ + mtype_default = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; snoop = true; if (uncached) { mtype = MTYPE_UC; @@ -1245,14 +1249,14 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, * socket should be treated as remote access so MTYPE_RW * cannot be used always. */ - mtype = MTYPE_RW; + mtype = mtype_default; } else if (adev->flags & AMD_IS_APU) { /* APU on carve out mode */ - mtype = MTYPE_RW; + mtype = mtype_default; } else { /* dGPU */ if (is_vram && bo_adev == adev) - mtype = MTYPE_RW; + mtype = mtype_default; else if (is_vram) mtype = MTYPE_NC; else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2b2129dd1e4a..477ef9294203 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1198,9 +1198,12 @@ svm_range_get_pte_flags(struct kfd_node *node, if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { - /* local HBM region close to partition */ + /* local HBM region close to partition + * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. + * To force use of MTYPE_RW, set use_mtype_cc_wa=0 + */ if (bo_node == node) - mapping_flags |= AMDGPU_VM_MTYPE_RW; + mapping_flags |= amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) mapping_flags |= AMDGPU_VM_MTYPE_NC; From 1e4a00334add40f609162914af7a24bc92951008 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 21 Feb 2023 17:31:32 -0500 Subject: [PATCH 523/861] drm/amdgpu: Fix per-BO MTYPE selection for GFXv9.4.3 Treat system memory on NUMA systems as remote by default. Overriding with a more efficient MTYPE per page will be implemented in the next patch. No need for a special case for APP APUs. System memory is handled the same for carve-out and native mode. And VRAM doesn't exist in native mode. Signed-off-by: Felix Kuehling Reviewed-by: Philip Yang Reviewed-and-tested-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 40 +++++++++++---------------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 24 +++++++++------- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 8623b93c05ee..cf976b5b7b63 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1186,9 +1186,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - unsigned int mtype; - unsigned int mtype_default; + /* TODO: memory partitions struct amdgpu_vm *vm = mapping->bo_va->base.vm;*/ + unsigned int mtype_local, mtype; bool snoop = false; + bool is_local; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): @@ -1228,35 +1229,26 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, } break; case IP_VERSION(9, 4, 3): - /* FIXME: Needs more work for handling multiple memory - * partitions (> NPS1 mode) e.g. NPS4 for both APU and dGPU - * modes. - * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. - * To force use of MTYPE_RW, set use_mtype_cc_wa=0 + /* Only local VRAM BOs or system memory on non-NUMA APUs + * can be assumed to be local in their entirety. Choose + * MTYPE_NC as safe fallback for all system memory BOs on + * NUMA systems. Their MTYPE can be overridden per-page in + * gmc_v9_0_override_vm_pte_flags. */ - mtype_default = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + mtype_local = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + is_local = (!is_vram && (adev->flags & AMD_IS_APU) && + num_possible_nodes() <= 1) || + (is_vram && adev == bo_adev /* TODO: memory partitions && + bo->mem_id == vm->mem_id*/); snoop = true; if (uncached) { mtype = MTYPE_UC; - } else if (adev->gmc.is_app_apu) { - /* FIXME: APU in native mode, NPS1 single socket only - * - * For suporting NUMA partitioned APU e.g. in NPS4 mode, - * this need to look at the NUMA node on which the - * system memory allocation was done. - * - * Memory access by a different partition within same - * socket should be treated as remote access so MTYPE_RW - * cannot be used always. - */ - mtype = mtype_default; } else if (adev->flags & AMD_IS_APU) { - /* APU on carve out mode */ - mtype = mtype_default; + mtype = is_local ? mtype_local : MTYPE_NC; } else { /* dGPU */ - if (is_vram && bo_adev == adev) - mtype = mtype_default; + if (is_local) + mtype = mtype_local; else if (is_vram) mtype = MTYPE_NC; else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 477ef9294203..4eec75b28917 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1151,6 +1151,7 @@ svm_range_get_pte_flags(struct kfd_node *node, bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; + unsigned int mtype_local; if (domain == SVM_RANGE_VRAM_DOMAIN) bo_node = prange->svm_bo->node; @@ -1191,19 +1192,16 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - //TODO: Need more work for handling multiple memory partitions - //e.g. NPS4. Current approch is only applicable without memory - //partitions. + mtype_local = amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : + AMDGPU_VM_MTYPE_RW; snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { - /* local HBM region close to partition - * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. - * To force use of MTYPE_RW, set use_mtype_cc_wa=0 - */ - if (bo_node == node) - mapping_flags |= amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; + /* local HBM region close to partition */ + if (bo_node->adev == node->adev /* TODO: memory partitions && + bo_node->mem_id == node->mem_id*/) + mapping_flags |= mtype_local; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) mapping_flags |= AMDGPU_VM_MTYPE_NC; @@ -1212,7 +1210,13 @@ svm_range_get_pte_flags(struct kfd_node *node, mapping_flags |= AMDGPU_VM_MTYPE_UC; /* system memory accessed by the APU */ } else if (node->adev->flags & AMD_IS_APU) { - mapping_flags |= AMDGPU_VM_MTYPE_NC; + /* On NUMA systems, locality is determined per-page + * in amdgpu_gmc_override_vm_pte_flags + */ + if (num_possible_nodes() <= 1) + mapping_flags |= mtype_local; + else + mapping_flags |= AMDGPU_VM_MTYPE_NC; /* system memory accessed by the dGPU */ } else { mapping_flags |= AMDGPU_VM_MTYPE_UC; From 352b919c1e5ff50c71d665395b27acbd1bf23a05 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 21 Feb 2023 17:44:18 -0500 Subject: [PATCH 524/861] drm/amdgpu: Override MTYPE per page on GFXv9.4.3 APUs On GFXv9.4.3 NUMA APUs, system memory locality must be determined per page to choose the correct MTYPE. This patch adds a GMC callback that can provide this per-page override and implements it for native mode. Carve-out mode is not yet supported and will use the safe default (remote) MTYPE for system memory. Signed-off-by: Felix Kuehling Reviewed-by: Philip Yang Reviewed-and-tested-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 7 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 22 ++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 64 +++++++++++++++++++++++ 3 files changed, 90 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 43357d699e6e..6794edd1d2d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -148,6 +148,10 @@ struct amdgpu_gmc_funcs { void (*get_vm_pte)(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping, uint64_t *flags); + /* override per-page pte flags */ + void (*override_vm_pte_flags)(struct amdgpu_device *dev, + struct amdgpu_vm *vm, + uint64_t addr, uint64_t *flags); /* get the amount of memory used by the vbios for pre-OS console */ unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); @@ -336,6 +340,9 @@ struct amdgpu_gmc { #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) +#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \ + (adev)->gmc.gmc_funcs->override_vm_pte_flags \ + ((adev), (vm), (addr), (pte_flags)) #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index bc5d126b600b..60b1da93b06d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -786,13 +786,14 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, uint64_t pe, uint64_t addr, unsigned int count, uint32_t incr, uint64_t flags) - { + struct amdgpu_device *adev = params->adev; + if (level != AMDGPU_VM_PTB) { flags |= AMDGPU_PDE_PTE; - amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); + amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags); - } else if (params->adev->asic_type >= CHIP_VEGA10 && + } else if (adev->asic_type >= CHIP_VEGA10 && !(flags & AMDGPU_PTE_VALID) && !(flags & AMDGPU_PTE_PRT)) { @@ -800,6 +801,21 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, flags |= AMDGPU_PTE_EXECUTABLE; } + /* APUs mapping system memory may need different MTYPEs on different + * NUMA nodes. Only do this for contiguous ranges that can be assumed + * to be on the same NUMA node. + */ + if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) && + adev->gmc.gmc_funcs->override_vm_pte_flags && + num_possible_nodes() > 1) { + if (!params->pages_addr) + amdgpu_gmc_override_vm_pte_flags(adev, params->vm, + addr, &flags); + else + dev_dbg(adev->dev, + "override_vm_pte_flags skipped: non-contiguous\n"); + } + params->vm->update_funcs->update(params, pt, pe, addr, count, incr, flags); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index cf976b5b7b63..c64a69f75da2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1297,6 +1297,69 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, mapping, flags); } +static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint64_t addr, uint64_t *flags) +{ + int local_node, nid; + + /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system + * memory can use more efficient MTYPEs. + */ + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + return; + + /* Only direct-mapped memory allows us to determine the NUMA node from + * the DMA address. + */ + if (!adev->ram_is_direct_mapped) { + dev_dbg(adev->dev, "RAM is not direct mapped\n"); + return; + } + + /* Only override mappings with MTYPE_NC, which is the safe default for + * cacheable memory. + */ + if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { + dev_dbg(adev->dev, "MTYPE is not NC\n"); + return; + } + + /* TODO: memory partitions. mem_id is hard-coded to 0 for now. + * FIXME: Only supported on native mode for now. For carve-out, the + * NUMA affinity of the GPU/VM needs to come from the PCI info because + * memory partitions are not associated with different NUMA nodes. + */ + if (adev->gmc.is_app_apu) { + local_node = adev->gmc.mem_partitions[/*vm->mem_id*/0].numa.node; + } else { + dev_dbg(adev->dev, "Only native mode APU is supported.\n"); + return; + } + + /* Only handle real RAM. Mappings of PCIe resources don't have struct + * page or NUMA nodes. + */ + if (!page_is_ram(addr >> PAGE_SHIFT)) { + dev_dbg(adev->dev, "Page is not RAM.\n"); + return; + } + nid = pfn_to_nid(addr >> PAGE_SHIFT); + dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", + /*vm->mem_id*/0, local_node, nid); + if (nid == local_node) { + unsigned int mtype_local = + amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + uint64_t old_flags = *flags; + + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(mtype_local); + dev_dbg(adev->dev, "flags updated from %llx to %llx\n", + old_flags, *flags); + } +} + static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); @@ -1368,6 +1431,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .map_mtype = gmc_v9_0_map_mtype, .get_vm_pde = gmc_v9_0_get_vm_pde, .get_vm_pte = gmc_v9_0_get_vm_pte, + .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, }; From 76eb9c95a409ea820b2e7c968c220e7a38f27d76 Mon Sep 17 00:00:00 2001 From: David Francis Date: Mon, 27 Feb 2023 10:33:11 -0500 Subject: [PATCH 525/861] drm/amdgpu/bu: add mtype_local as a module parameter Selects the MTYPE to be used for local memory, (0 = MTYPE_CC (default), 1 = MTYPE_NC, 2 = MTYPE_RW) v2: squash in build fix (Alex) Reviewed-by: Graham Sider Signed-off-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 19 ++++++++++++++++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 +-- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cd2a29a7e26d..c2feaf2fd070 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -212,7 +212,7 @@ extern int amdgpu_noretry; extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; -extern bool amdgpu_use_mtype_cc_wa; +extern int amdgpu_mtype_local; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8bc37826a99f..706ba4af062f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -823,11 +823,11 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm #endif /** - * DOC: use_mtype_cc_wa (bool) + * DOC: mtype_local (int) */ -bool amdgpu_use_mtype_cc_wa = true; -MODULE_PARM_DESC(use_mtype_cc_wa, "Use MTYPE_CC workaround (0 = use MTYPE_RW where applicable, 1 = use MTYPE_CC where applicable (default))"); -module_param_named(use_mtype_cc_wa, amdgpu_use_mtype_cc_wa, bool, 0444); +int amdgpu_mtype_local; +MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_CC (default), 1 = MTYPE_NC, 2 = MTYPE_RW)"); +module_param_named(mtype_local, amdgpu_mtype_local, int, 0444); /** * DOC: pcie_p2p (bool) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c64a69f75da2..5a1414300271 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1235,7 +1235,16 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, * NUMA systems. Their MTYPE can be overridden per-page in * gmc_v9_0_override_vm_pte_flags. */ - mtype_local = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + mtype_local = MTYPE_CC; + if (amdgpu_mtype_local == 1) { + DRM_INFO_ONCE("Using MTYPE_NC for local memory\n"); + mtype_local = MTYPE_NC; + } else if (amdgpu_mtype_local == 2) { + DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); + mtype_local = MTYPE_RW; + } else { + DRM_INFO_ONCE("Using MTYPE_CC for local memory\n"); + } is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || (is_vram && adev == bo_adev /* TODO: memory partitions && @@ -1349,9 +1358,13 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", /*vm->mem_id*/0, local_node, nid); if (nid == local_node) { - unsigned int mtype_local = - amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; uint64_t old_flags = *flags; + unsigned int mtype_local = MTYPE_CC; + + if (amdgpu_mtype_local == 1) + mtype_local = MTYPE_NC; + else if (amdgpu_mtype_local == 2) + mtype_local = MTYPE_RW; *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(mtype_local); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 4eec75b28917..df0ed5677609 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1192,8 +1192,7 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - mtype_local = amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : - AMDGPU_VM_MTYPE_RW; + mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_CC); snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; From b9cbd51000ad3541351ca832b00600870ac08e5c Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Mon, 6 Mar 2023 17:56:44 -0500 Subject: [PATCH 526/861] drm/amdgpu/bu: update mtype_local parameter settings Update mtype_local module parameter to use MTYPE_RW by default. 0: MTYPE_RW (default) 1: MTYPE_NC 2: MTYPE_CC Signed-off-by: Graham Sider Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 ++++++------ drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 ++- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 706ba4af062f..aa466a9eb956 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -826,7 +826,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm * DOC: mtype_local (int) */ int amdgpu_mtype_local; -MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_CC (default), 1 = MTYPE_NC, 2 = MTYPE_RW)"); +MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)"); module_param_named(mtype_local, amdgpu_mtype_local, int, 0444); /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5a1414300271..32eb4f4f5492 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1235,15 +1235,15 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, * NUMA systems. Their MTYPE can be overridden per-page in * gmc_v9_0_override_vm_pte_flags. */ - mtype_local = MTYPE_CC; + mtype_local = MTYPE_RW; if (amdgpu_mtype_local == 1) { DRM_INFO_ONCE("Using MTYPE_NC for local memory\n"); mtype_local = MTYPE_NC; } else if (amdgpu_mtype_local == 2) { - DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); - mtype_local = MTYPE_RW; - } else { DRM_INFO_ONCE("Using MTYPE_CC for local memory\n"); + mtype_local = MTYPE_CC; + } else { + DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); } is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || @@ -1359,12 +1359,12 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, /*vm->mem_id*/0, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; - unsigned int mtype_local = MTYPE_CC; + unsigned int mtype_local = MTYPE_RW; if (amdgpu_mtype_local == 1) mtype_local = MTYPE_NC; else if (amdgpu_mtype_local == 2) - mtype_local = MTYPE_RW; + mtype_local = MTYPE_CC; *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(mtype_local); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index df0ed5677609..e6348d4133fd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1192,7 +1192,8 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_CC); + mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : + (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW); snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; From 2c1c7ba457d4ecf475c0e220ac5359971355c6eb Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 16:55:02 -0400 Subject: [PATCH 527/861] drm/amdgpu: support partition drm devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support partition drm devices on GC_HWIP IP_VERSION(9, 4, 3). This is a temporary solution and will be superceded. Signed-off-by: Christian König Signed-off-by: James Zhu Reviewed-and-tested-by: Philip Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 32 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 59 +++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 5 ++ 6 files changed, 99 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c2feaf2fd070..eb2fb968e3af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -107,6 +107,7 @@ #include "amdgpu_fdinfo.h" #include "amdgpu_mca.h" #include "amdgpu_ras.h" +#include "amdgpu_xcp.h" #define MAX_GPU_INSTANCE 64 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0c6f983fb2ad..4d9c535bcb0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -6065,6 +6065,7 @@ void amdgpu_device_halt(struct amdgpu_device *adev) struct pci_dev *pdev = adev->pdev; struct drm_device *ddev = adev_to_drm(adev); + amdgpu_xcp_dev_unplug(adev); drm_dev_unplug(ddev); amdgpu_irq_disable_all(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index aa466a9eb956..7300df2a342c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2185,6 +2185,10 @@ retry_init: goto err_pci; } + ret = amdgpu_xcp_dev_register(adev, ent); + if (ret) + goto err_pci; + /* * 1. don't init fbdev on hw without DCE * 2. don't init fbdev if there are no connectors @@ -2257,6 +2261,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); + amdgpu_xcp_dev_unplug(adev); drm_dev_unplug(dev); if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { @@ -2840,6 +2845,33 @@ static const struct drm_driver amdgpu_kms_driver = { .patchlevel = KMS_DRIVER_PATCHLEVEL, }; +const struct drm_driver amdgpu_partition_driver = { + .driver_features = + DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ | + DRIVER_SYNCOBJ_TIMELINE, + .open = amdgpu_driver_open_kms, + .postclose = amdgpu_driver_postclose_kms, + .lastclose = amdgpu_driver_lastclose_kms, + .ioctls = amdgpu_ioctls_kms, + .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), + .dumb_create = amdgpu_mode_dumb_create, + .dumb_map_offset = amdgpu_mode_dumb_mmap, + .fops = &amdgpu_driver_kms_fops, + .release = &amdgpu_driver_release_kms, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = amdgpu_gem_prime_import, + .gem_prime_mmap = drm_gem_prime_mmap, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = KMS_DRIVER_MAJOR, + .minor = KMS_DRIVER_MINOR, + .patchlevel = KMS_DRIVER_PATCHLEVEL, +}; + static struct pci_error_handlers amdgpu_pci_err_handler = { .error_detected = amdgpu_pci_error_detected, .mmio_enabled = amdgpu_pci_mmio_enabled, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h index 8178323e4bef..5bc2cb661af7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h @@ -42,6 +42,8 @@ #define DRIVER_DESC "AMD GPU" #define DRIVER_DATE "20150101" +extern const struct drm_driver amdgpu_partition_driver; + long amdgpu_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index bca226cc4e0b..8b28b18e4291 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -22,6 +22,9 @@ */ #include "amdgpu.h" #include "amdgpu_xcp.h" +#include "amdgpu_drv.h" + +#include static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp_ip *xcp_ip, int xcp_state) @@ -217,6 +220,31 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) return mode; } +static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) +{ + struct drm_device *p_ddev; + struct pci_dev *pdev; + struct drm_device *ddev; + int i; + + pdev = adev->pdev; + ddev = adev_to_drm(adev); + + for (i = 0; i < MAX_XCP; i++) { + p_ddev = drm_dev_alloc(&amdgpu_partition_driver, + &pci_upstream_bridge(pdev)->dev); + if (IS_ERR(p_ddev)) + return PTR_ERR(p_ddev); + + /* Redirect all IOCTLs to the primary device */ + p_ddev->render->dev = ddev; + p_ddev->vma_offset_manager = ddev->vma_offset_manager; + adev->xcp_mgr->xcp[i].ddev = p_ddev; + } + + return 0; +} + int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int init_num_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs) @@ -242,7 +270,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, adev->xcp_mgr = xcp_mgr; - return 0; + return amdgpu_xcp_dev_alloc(adev); } int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, @@ -278,3 +306,32 @@ int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, return 0; } + +int amdgpu_xcp_dev_register(struct amdgpu_device *adev, + const struct pci_device_id *ent) +{ + int i, ret; + + if (!adev->xcp_mgr) + return 0; + + for (i = 0; i < MAX_XCP; i++) { + ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data); + if (ret) + return ret; + } + + return 0; +} + +void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) +{ + int i; + + if (!adev->xcp_mgr) + return; + + for (i = 0; i < MAX_XCP; i++) + drm_dev_unplug(adev->xcp_mgr->xcp[i].ddev); +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index e1319b887bf3..dad0b98d1ae7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -70,6 +70,7 @@ struct amdgpu_xcp { uint8_t id; uint8_t mem_id; bool valid; + struct drm_device *ddev; }; struct amdgpu_xcp_mgr { @@ -115,6 +116,10 @@ int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, enum AMDGPU_XCP_IP_BLOCK ip, uint32_t *inst_mask); +int amdgpu_xcp_dev_register(struct amdgpu_device *adev, + const struct pci_device_id *ent); +void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev); + static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) { if (!xcp_mgr) From be3800f57c3b0fb39dc732345279db76a50559a3 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 28 Feb 2023 14:16:38 -0500 Subject: [PATCH 528/861] drm/amdgpu: find partition ID when open device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Find partition ID when open device from render device minor. Signed-off-by: Christian König Signed-off-by: James Zhu Reviewed-and-tested-by: Philip Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 29 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 3 +++ 4 files changed, 38 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index eb2fb968e3af..bba8cfeff71f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -467,6 +467,8 @@ struct amdgpu_fpriv { struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; + /** GPU partition selection */ + uint32_t xcp_id; }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 44997c7ee89d..879718598fa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1223,6 +1223,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto out_suspend; } + r = amdgpu_xcp_open_device(adev, fpriv, file_priv); + if (r) + return r; + pasid = amdgpu_pasid_alloc(16); if (pasid < 0) { dev_warn(adev->dev, "No more PASIDs available!"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 8b28b18e4291..9b627a8b1d5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -335,3 +335,32 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) drm_dev_unplug(adev->xcp_mgr->xcp[i].ddev); } +int amdgpu_xcp_open_device(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, + struct drm_file *file_priv) +{ + int i; + + if (!adev->xcp_mgr) + return 0; + + fpriv->xcp_id = ~0; + for (i = 0; i < MAX_XCP; ++i) { + if (!adev->xcp_mgr->xcp[i].ddev) + break; + + if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) { + if (adev->xcp_mgr->xcp[i].valid == FALSE) { + dev_err(adev->dev, "renderD%d partition %d not valid!", + file_priv->minor->index, i); + return -ENOENT; + } + dev_dbg(adev->dev, "renderD%d partition %d openned!", + file_priv->minor->index, i); + fpriv->xcp_id = i; + break; + } + } + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index dad0b98d1ae7..ad60520f952c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -119,6 +119,9 @@ int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, int amdgpu_xcp_dev_register(struct amdgpu_device *adev, const struct pci_device_id *ent); void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev); +int amdgpu_xcp_open_device(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, + struct drm_file *file_priv); static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) { From 797a0a142ca7f3b823ae1032983111c055bc50fb Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 16:45:12 -0400 Subject: [PATCH 529/861] drm/amdgpu: add partition ID track in ring Keep track partition ID in ring. Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 + .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 41 +++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d2b1a8854603..4a4c9f89c302 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -252,6 +252,7 @@ struct amdgpu_ring { uint32_t buf_mask; u32 idx; u32 xcc_id; + u32 xcp_id; u32 me; u32 pipe; u32 queue; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 97011e7e031d..c90ea34ef9ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -61,6 +61,47 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } +static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, + uint32_t inst_idx, struct amdgpu_ring *ring) +{ + int xcp_id; + enum AMDGPU_XCP_IP_BLOCK ip_blk; + uint32_t inst_mask; + + ring->xcp_id = ~0; + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return; + + inst_mask = 1 << inst_idx; + + switch (ring->funcs->type) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + ip_blk = AMDGPU_XCP_GFX; + break; + case AMDGPU_RING_TYPE_SDMA: + ip_blk = AMDGPU_XCP_SDMA; + break; + case AMDGPU_RING_TYPE_VCN_ENC: + case AMDGPU_RING_TYPE_VCN_JPEG: + ip_blk = AMDGPU_XCP_VCN; + if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + inst_mask = 1 << (inst_idx * 2); + break; + default: + DRM_ERROR("Not support ring type %d!", ring->funcs->type); + return; + } + + for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { + if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { + ring->xcp_id = xcp_id; + break; + } + } +} + static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst) From 0a9115fd952a5de27dc360dc0c4618b6a1846c58 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 16:55:02 -0400 Subject: [PATCH 530/861] drm/amdgpu: update header to support partition scheduling Update header to support partition scheduling. Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index ad60520f952c..cca06d38b03d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -70,7 +70,9 @@ struct amdgpu_xcp { uint8_t id; uint8_t mem_id; bool valid; + atomic_t ref_cnt; struct drm_device *ddev; + struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; }; struct amdgpu_xcp_mgr { @@ -97,6 +99,10 @@ struct amdgpu_xcp_mgr_funcs { int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*select_scheds)(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds); + int (*update_partition_sched_list)(struct amdgpu_device *adev); }; int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -123,6 +129,15 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *file_priv); +#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ + ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ + (adev)->xcp_mgr->funcs->select_scheds ? \ + (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT) +#define amdgpu_xcp_update_partition_sched_list(adev) \ + ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ + (adev)->xcp_mgr->funcs->update_partition_sched_list ? \ + (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0) + static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) { if (!xcp_mgr) From d425c6f48b189f0a5a7c7d26980fd7a2114fb35d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 17:19:11 -0400 Subject: [PATCH 531/861] drm/amdgpu: add partition scheduler list update Add partition scheduler list update in late init and xcp partition mode switch. Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 2 + .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 67 ++++++++++++++++++- 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4d9c535bcb0c..02ee79b7b56d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2474,6 +2474,8 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) } } + amdgpu_xcp_update_partition_sched_list(adev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 9b627a8b1d5c..78fce5aab218 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -118,6 +118,7 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) { + struct amdgpu_device *adev = xcp_mgr->adev; struct amdgpu_xcp_ip ip; uint8_t mem_id; int i, j, ret; @@ -153,6 +154,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) } xcp_mgr->num_xcps = num_xcps; + amdgpu_xcp_update_partition_sched_list(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index c90ea34ef9ec..073ae95e6dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -102,6 +102,70 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, } } +static void aqua_vanjaram_xcp_gpu_sched_update( + struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int sel_xcp_id) +{ + unsigned int *num_gpu_sched; + + num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] + .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; + adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] + .sched[(*num_gpu_sched)++] = &ring->sched; + DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name, + sel_xcp_id, ring->funcs->type, + ring->hw_prio, *num_gpu_sched); +} + +static int aqua_vanjaram_xcp_sched_list_update( + struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < MAX_XCP; i++) { + atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); + memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); + } + + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + ring = adev->rings[i]; + if (!ring || !ring->sched.ready) + continue; + + aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); + + /* VCN is shared by two partitions under CPX MODE */ + if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); + } + + return 0; +} + +static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring); + else + aqua_vanjaram_set_xcp_id(adev, ring->me, ring); + } + + return aqua_vanjaram_xcp_sched_list_update(adev); +} + static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst) @@ -483,7 +547,8 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, - .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id + .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, + .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) From c30e326e488ec43f6b0931f16ddba16a3370ed4a Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 17:12:01 -0400 Subject: [PATCH 532/861] drm/amdgpu: keep amdgpu_ctx_mgr in ctx structure Keep amdgpu_ctx_mgr in ctx structure to track fpriv. v2: add missing fpriv declaration lost in rebase Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e1f642a3dc2f..ef279481d8fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -303,6 +303,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx, static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { + struct amdgpu_fpriv *fpriv = filp->driver_priv; u32 current_stable_pstate; int r; @@ -331,6 +332,7 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority, else ctx->stable_pstate = current_stable_pstate; + ctx->ctx_mgr = &(fpriv->ctx_mgr); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 0fa0e56daf67..f1e27b6e16f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -57,6 +57,7 @@ struct amdgpu_ctx { unsigned long ras_counter_ce; unsigned long ras_counter_ue; uint32_t stable_pstate; + struct amdgpu_ctx_mgr *ctx_mgr; }; struct amdgpu_ctx_mgr { From cd7d8400aa04ba989a87949cf4611b7e16af274f Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 17:00:54 -0400 Subject: [PATCH 533/861] drm/amdgpu: add partition schedule for GC(9, 4, 3) Implement partition schedule for GC(9, 4, 3). Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 073ae95e6dd6..4ca932a62ce6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -166,6 +166,46 @@ static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) return aqua_vanjaram_xcp_sched_list_update(adev); } +int aqua_vanjaram_select_scheds( + struct amdgpu_device *adev, + u32 hw_ip, + u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds) +{ + u32 sel_xcp_id; + int i; + + if (fpriv->xcp_id == ~0) { + u32 least_ref_cnt = ~0; + + fpriv->xcp_id = 0; + for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { + u32 total_ref_cnt; + + total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); + if (total_ref_cnt < least_ref_cnt) { + fpriv->xcp_id = i; + least_ref_cnt = total_ref_cnt; + } + } + } + sel_xcp_id = fpriv->xcp_id; + + if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { + *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; + *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; + atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); + DRM_DEBUG("Selected partition #%d", sel_xcp_id); + } else { + DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id); + return -ENOENT; + } + + return 0; +} + static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst) @@ -548,6 +588,7 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, + .select_scheds = &aqua_vanjaram_select_scheds, .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list }; From 9a18292d41ae201333fc4203b3e7987ce5c1de6e Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 17:15:02 -0400 Subject: [PATCH 534/861] drm/amdgpu: run partition schedule if it is supported Run partition schedule if it is supported during ctx init entity. Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index ef279481d8fb..333757486098 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -222,8 +222,19 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); - scheds = adev->gpu_sched[hw_ip][hw_prio].sched; - num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; + + if (!(adev)->xcp_mgr) { + scheds = adev->gpu_sched[hw_ip][hw_prio].sched; + num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; + } else { + struct amdgpu_fpriv *fpriv; + + fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr); + r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv, + &num_scheds, &scheds); + if (r) + goto cleanup_entity; + } /* disable load balance if the hw engine retains context among dependent jobs */ if (hw_ip == AMDGPU_HW_IP_VCN_ENC || From 3e7c6fe38724eab767033f9d26b496bc2e815157 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 15 Aug 2022 17:21:44 -0400 Subject: [PATCH 535/861] drm/amdgpu: update ref_cnt before ctx free Update ref_cnt before ctx free. Signed-off-by: James Zhu Acked-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 2 ++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 333757486098..410acdd4554c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -266,7 +266,8 @@ error_free_entity: return r; } -static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) +static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev, + struct amdgpu_ctx_entity *entity) { ktime_t res = ns_to_ktime(0); int i; @@ -279,6 +280,8 @@ static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) dma_fence_put(entity->fences[i]); } + amdgpu_xcp_release_sched(adev, entity); + kfree(entity); return res; } @@ -412,7 +415,7 @@ static void amdgpu_ctx_fini(struct kref *ref) for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { ktime_t spend; - spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]); + spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]); atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 78fce5aab218..9b960ba0b7ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -366,3 +366,19 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, return 0; } +void amdgpu_xcp_release_sched(struct amdgpu_device *adev, + struct amdgpu_ctx_entity *entity) +{ + struct drm_gpu_scheduler *sched; + struct amdgpu_ring *ring; + + if (!adev->xcp_mgr) + return; + + sched = entity->entity.rq->sched; + if (sched->ready) { + ring = to_amdgpu_ring(entity->entity.rq->sched); + atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt); + } +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index cca06d38b03d..39aca87ce204 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -128,6 +128,8 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev); int amdgpu_xcp_open_device(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *file_priv); +void amdgpu_xcp_release_sched(struct amdgpu_device *adev, + struct amdgpu_ctx_entity *entity); #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ From d26ea1b346e71c07aa00956c32fe2d2dbec068ec Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 3 Mar 2023 19:45:45 -0500 Subject: [PATCH 536/861] drm/amdgpu: Add xcp manager num_xcp_per_mem_partition Used by KFD to check memory limit accounting. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 3 +++ 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 9b960ba0b7ac..f2981d21d4e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -156,6 +156,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) xcp_mgr->num_xcps = num_xcps; amdgpu_xcp_update_partition_sched_list(adev); + xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 39aca87ce204..68b63b970ce8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -83,6 +83,9 @@ struct amdgpu_xcp_mgr { struct amdgpu_xcp xcp[MAX_XCP]; uint8_t num_xcps; int8_t mode; + + /* Used to determine KFD memory size limits per XCP */ + unsigned int num_xcp_per_mem_partition; }; struct amdgpu_xcp_mgr_funcs { From a476c0c645535cc0361938becb440b4239996079 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 23 Feb 2023 11:03:37 -0500 Subject: [PATCH 537/861] drm/amdkfd: Store drm node minor number for kfd nodes From KFD topology, application will find kfd node with the corresponding drm device node minor number, for example if partition drm node starts from /dev/dri/renderD129, then KFD node 0 with store drm node minor number 129. Application will open drm node /dev/dri/renderD129 to create amdgpu vm for kfd node 0 with the correct vm->mem_id to indicate the memory partition. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 6d6243b978e1..a8e25aecf839 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1942,8 +1942,12 @@ int kfd_topology_add_device(struct kfd_node *gpu) amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; - dev->node_props.drm_render_minor = - gpu->kfd->shared_resources.drm_render_minor; + + if (gpu->xcp) + dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index; + else + dev->node_props.drm_render_minor = + gpu->kfd->shared_resources.drm_render_minor; dev->node_props.hive_id = gpu->kfd->hive_id; dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); From 934deb64fdf220d2caf978d22615bcc7c9f6897e Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 23 Feb 2023 19:58:22 -0500 Subject: [PATCH 538/861] drm/amdgpu: Add memory partition id to amdgpu_vm If xcp_mgr is initialized, add mem_id to amdgpu_vm structure to store memory partition number when creating amdgpu_vm for the xcp. The xcp number is decided when opening the render device, for example /dev/dri/renderD129 is xcp_id 0, /dev/dri/renderD130 is xcp_id 1. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 3 +++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 879718598fa4..815098be4c2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1223,10 +1223,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto out_suspend; } - r = amdgpu_xcp_open_device(adev, fpriv, file_priv); - if (r) - return r; - pasid = amdgpu_pasid_alloc(16); if (pasid < 0) { dev_warn(adev->dev, "No more PASIDs available!"); @@ -1237,6 +1233,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (r) goto error_pasid; + r = amdgpu_xcp_open_device(adev, fpriv, file_priv); + if (r) + goto error_vm; + r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); if (r) goto error_vm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 8add5f5eb92a..14f9a2bf3acb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -329,6 +329,9 @@ struct amdgpu_vm { struct ttm_lru_bulk_move lru_bulk_move; /* Flag to indicate if VM is used for compute */ bool is_compute_context; + + /* Memory partition number, -1 means any partition */ + int8_t mem_id; }; struct amdgpu_vm_manager { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index f2981d21d4e0..610c32c4f5af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -364,6 +364,9 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, break; } } + + fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 : + adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; return 0; } From 4c6ce75fdd628c43aea11448ed41b52119dae42b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:11:29 -0500 Subject: [PATCH 539/861] drm/amdkfd: Show KFD node memory partition info Show KFD node memory partition id and size, add helper function KFD_XCP_MEMORY_SIZE to get kfd node memory size, will be used later to support memory accounting per partition. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e4e1dbba060a..324cb566ca2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -330,6 +330,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag); +#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ + (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ + (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ + (n)->adev->gmc.real_vram_size) + #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index b5497d2ee984..db5b53fcdf11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -724,7 +724,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); @@ -754,6 +753,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; } + if (node->xcp) { + dev_info(kfd_device, "KFD node %d partition %d size %lldM\n", + node->node_id, node->xcp->mem_id, + KFD_XCP_MEMORY_SIZE(node) >> 20); + } + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { From f24e924b7e8aba7b62671e7e1a19d83301a08597 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:25:28 -0500 Subject: [PATCH 540/861] drm/amdgpu: Add memory partition mem_id to amdgpu_bo Add mem_id_plus1 parameter to amdgpu_gem_object_create and pass it to amdgpu_bo_create. For dGPU mode allocation, mem_id is used by VRAM manager to get the memory partition fpfn, lpfn from xcp manager. For APU native mode allocation, mem_id is used to get NUMA node id from xcp manager, then pass to TTM as numa pool id to alloc memory from the specific NUMA node. mem_id -1 means for entire VRAM or any NUMA nodes. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 5 +++++ 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index fd395cd61b54..71f1e1990925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -290,7 +290,7 @@ create_dmamap_sg_bo(struct amdgpu_device *adev, ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags, - ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj); + ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); amdgpu_bo_unreserve(mem->bo); @@ -1721,7 +1721,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain)); ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, - bo_type, NULL, &gobj); + bo_type, NULL, &gobj, 0); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 6dceaf40625b..12210598e5b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -336,7 +336,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_CPU, flags, - ttm_bo_type_sg, resv, &gobj); + ttm_bo_type_sg, resv, &gobj, 0); if (ret) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 863cb668e000..33ebee18b80d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -98,7 +98,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj) + struct drm_gem_object **obj, int8_t mem_id_plus1) { struct amdgpu_bo *bo; struct amdgpu_bo_user *ubo; @@ -116,6 +116,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.flags = flags; bp.domain = initial_domain; bp.bo_ptr_size = sizeof(struct amdgpu_bo); + bp.mem_id_plus1 = mem_id_plus1; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) @@ -336,7 +337,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, initial_domain, - flags, ttm_bo_type_device, resv, &gobj); + flags, ttm_bo_type_device, resv, &gobj, 0); if (r && r != -ERESTARTSYS) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -405,7 +406,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, - 0, ttm_bo_type_device, NULL, &gobj); + 0, ttm_bo_type_device, NULL, &gobj, 0); if (r) return r; @@ -931,7 +932,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, domain = amdgpu_bo_get_preferred_domain(adev, amdgpu_display_supported_domains(adev, flags)); r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, - ttm_bo_type_device, NULL, &gobj); + ttm_bo_type_device, NULL, &gobj, 0); if (r) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 637bf51dbf06..646c4fcc8e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -43,8 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj); - + struct drm_gem_object **obj, int8_t mem_id_plus1); int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 725530eb4e0c..628632efabc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -574,6 +574,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->flags = bp->flags; + /* bo->mem_id -1 means any partition */ + bo->mem_id = bp->mem_id_plus1 - 1; + if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 35b8106816a1..eb24a66ccee5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -56,6 +56,8 @@ struct amdgpu_bo_param { bool no_wait_gpu; struct dma_resv *resv; void (*destroy)(struct ttm_buffer_object *bo); + /* memory partition number plus 1, 0 means any partition */ + int8_t mem_id_plus1; }; /* bo virtual addresses in a vm */ @@ -108,6 +110,9 @@ struct amdgpu_bo { struct mmu_interval_notifier notifier; #endif struct kgd_mem *kfd_bo; + + /* memory partition number, -1 means any partition */ + int8_t mem_id; }; struct amdgpu_bo_user { From 53c5692e7a3c8e8eed3ec6b876a3c982d217a5d7 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:50:09 -0500 Subject: [PATCH 541/861] drm/amdkfd: Alloc memory of GPU support memory partition For dGPU mode VRAM allocation, create amdgpu_bo from amdgpu_vm->mem_id, to alloc from the correct memory range. For APU mode VRAM allocation, set alloc domain to GTT, and set bp->mem_id_plus1 from amdgpu_vm->mem_id + 1 to create amdgpu_bo, to allocate system memory from correct NUMA node. For GTT allocation, use mem_id -1 to allocate system memory from any NUMA nodes. Remove amdgpu_ttm_tt_set_mem_pool, to avoid the confusion that memory maybe allocated from different mem_id. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 24 ++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 20 +--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 - 3 files changed, 8 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 71f1e1990925..c234dc0db799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1641,9 +1641,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; uint64_t aligned_size; + int8_t mem_id = -1; u64 alloc_flags; int ret; - int mem_id = 0; /* Fixme : to be changed when mem_id support patch lands, until then NPS1, SPX only */ /* * Check on which domain to allocate BO @@ -1653,13 +1653,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if (adev->gmc.is_app_apu) { domain = AMDGPU_GEM_DOMAIN_GTT; - alloc_domain = AMDGPU_GEM_DOMAIN_CPU; + alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; } else { alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } + mem_id = avm->mem_id; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1717,11 +1718,12 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( goto err_reserve_limit; } - pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", - va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain)); + pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s mem_id %d\n", + va, (*mem)->aql_queue ? size << 1 : size, + domain_string(alloc_domain), mem_id); ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, - bo_type, NULL, &gobj, 0); + bo_type, NULL, &gobj, mem_id + 1); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); @@ -1747,17 +1749,6 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( (*mem)->mapped_to_gpu_memory = 0; (*mem)->process_info = avm->process_info; - if (adev->gmc.is_app_apu && - ((*mem)->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) { - bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; - bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; - ret = amdgpu_ttm_tt_set_mem_pool(&bo->tbo, mem_id); - if (ret) { - pr_debug("failed to set ttm mem pool %d\n", ret); - goto err_set_mem_partition; - } - } - add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); if (user_addr) { @@ -1784,7 +1775,6 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: err_pin_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); -err_set_mem_partition: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: /* Don't unreserve system mem limit twice */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 09d1a98bd11e..129c593cb2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1060,7 +1060,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->gobj = &bo->base; - gtt->pool_id = NUMA_NO_NODE; + gtt->pool_id = abo->mem_id; if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) caching = ttm_write_combined; @@ -1155,24 +1155,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, return ttm_pool_free(pool, ttm); } -/** - * amdgpu_ttm_tt_set_mem_pool - Set the TTM memory pool for the TTM BO - * @tbo: The ttm_buffer_object that backs the VRAM bo - * @mem_id: to select the initialized ttm pool corresponding to the memory partition - */ -int amdgpu_ttm_tt_set_mem_pool(struct ttm_buffer_object *tbo, int mem_id) -{ - struct ttm_tt *ttm = tbo->ttm; - struct amdgpu_ttm_tt *gtt; - - if (!ttm && !ttm_tt_is_populated(ttm)) - return -EINVAL; - - gtt = ttm_to_amdgpu_ttm_tt(ttm); - gtt->pool_id = mem_id; - return 0; -} - /** * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current * task diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index fe32de1bf4d5..8ef048a0a33e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -192,7 +192,6 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, unsigned long end, unsigned long *userptr); -int amdgpu_ttm_tt_set_mem_pool(struct ttm_buffer_object *tbo, int mem_id); bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); From 2046ed6c8aa951e4ae83c5022bb0a7c777386097 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:45:32 -0500 Subject: [PATCH 542/861] drm/amdkfd: SVM range allocation support memory partition Pass kfd node->xcp->mem_id to amdgpu bo create parameter mem_id_plus1 to allocate new svm_bo on the specified memory partition. This is only for dGPU mode as we don't migrate with APU mode. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index e6348d4133fd..62aa7fb2eaa5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -555,16 +555,20 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE; bp.type = ttm_bo_type_device; bp.resv = NULL; + if (node->xcp) + bp.mem_id_plus1 = node->xcp->mem_id + 1; - /* TODO: Allocate memory from the right memory partition. We can sort - * out the details later, once basic memory partitioning is working - */ r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); goto create_bo_failed; } bo = &ubo->bo; + + pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", + bo->tbo.resource->start << PAGE_SHIFT, bp.size, + bp.mem_id_plus1 - 1); + r = amdgpu_bo_reserve(bo, true); if (r) { pr_debug("failed %d to reserve bo\n", r); From 7f6db89418f9d26eb68e050ff16de8e9827011ca Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:54:29 -0500 Subject: [PATCH 543/861] drm/amdgpu: dGPU mode placement support memory partition dGPU mode uses VRAM manager to validate bo, amdgpu bo placement use the mem_id to get the allocation range first, last page frame number from xcp manager, pass to drm buddy allocator as the allowed range. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 628632efabc8..85ad355815fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -132,13 +132,18 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_VRAM) { unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; - places[c].fpfn = 0; - places[c].lpfn = 0; + if (adev->gmc.mem_partitions && abo->mem_id >= 0) { + places[c].fpfn = adev->gmc.mem_partitions[abo->mem_id].range.fpfn; + places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn; + } else { + places[c].fpfn = 0; + places[c].lpfn = 0; + } places[c].mem_type = TTM_PL_VRAM; places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - places[c].lpfn = visible_pfn; + places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn); else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size) places[c].flags |= TTM_PL_FLAG_TOPDOWN; From dc12f9eddedb8b41f4dc948e5e636e5221fb4d43 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 2 Feb 2023 11:07:53 -0500 Subject: [PATCH 544/861] drm/amdkfd: Update MTYPE for far memory partition Use MTYPE RW/MTYPE_CC for mapping system memory or VRAM to KFD node within the same memory partition, use MTYPE_NC for mapping on KFD node from the far memory partition of the same socket or from another socket on same XGMI hive. On NPS4 or 4P system, MTYPE will be overridden per page depending on the memory NUMA node id and vm->mem_id. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 +++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 9 +++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 32eb4f4f5492..263d17a8b433 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1186,7 +1186,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - /* TODO: memory partitions struct amdgpu_vm *vm = mapping->bo_va->base.vm;*/ + struct amdgpu_vm *vm = mapping->bo_va->base.vm; unsigned int mtype_local, mtype; bool snoop = false; bool is_local; @@ -1247,8 +1247,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, } is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || - (is_vram && adev == bo_adev /* TODO: memory partitions && - bo->mem_id == vm->mem_id*/); + (is_vram && adev == bo_adev && + bo->mem_id == vm->mem_id); snoop = true; if (uncached) { mtype = MTYPE_UC; @@ -1335,13 +1335,12 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, return; } - /* TODO: memory partitions. mem_id is hard-coded to 0 for now. - * FIXME: Only supported on native mode for now. For carve-out, the + /* FIXME: Only supported on native mode for now. For carve-out, the * NUMA affinity of the GPU/VM needs to come from the PCI info because * memory partitions are not associated with different NUMA nodes. */ - if (adev->gmc.is_app_apu) { - local_node = adev->gmc.mem_partitions[/*vm->mem_id*/0].numa.node; + if (adev->gmc.is_app_apu && vm->mem_id >= 0) { + local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; } else { dev_dbg(adev->dev, "Only native mode APU is supported.\n"); return; @@ -1356,7 +1355,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, } nid = pfn_to_nid(addr >> PAGE_SHIFT); dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", - /*vm->mem_id*/0, local_node, nid); + vm->mem_id, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; unsigned int mtype_local = MTYPE_RW; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 62aa7fb2eaa5..a700d9ccd054 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1203,8 +1203,8 @@ svm_range_get_pte_flags(struct kfd_node *node, mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { /* local HBM region close to partition */ - if (bo_node->adev == node->adev /* TODO: memory partitions && - bo_node->mem_id == node->mem_id*/) + if (bo_node->adev == node->adev && + (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id)) mapping_flags |= mtype_local; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) @@ -1358,8 +1358,9 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, pte_flags); - /* TODO: we still need to determine the vm_manager.vram_base_offset based on - * the memory partition. + /* For dGPU mode, we use same vm_manager to allocate VRAM for + * different memory partition based on fpfn/lpfn, we should use + * same vm_manager.vram_base_offset regardless memory partition. */ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, From ea7bf2f22061a7bd77b17ddd4ac1bb3500ae823f Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 23 Feb 2023 20:00:05 -0500 Subject: [PATCH 545/861] drm/amdgpu: Alloc page table on correct memory partition Alloc kernel mode page table bo uses the amdgpu_vm->mem_id + 1 as bp mem_id_plus1 parameter. For APU mode, select the correct TTM pool to alloc page from the corresponding memory partition, this will be the closest NUMA node. For dGPU mode, select the correct address range for vram manager. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 60b1da93b06d..62fc7e8d326e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -534,6 +534,8 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.no_wait_gpu = immediate; + bp.mem_id_plus1 = vm->mem_id + 1; + if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; @@ -558,6 +560,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.resv = bo->tbo.base.resv; bp.bo_ptr_size = sizeof(struct amdgpu_bo); + bp.mem_id_plus1 = vm->mem_id + 1; r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); From 6cfba94a77c5cbad2d7c106ac5f026b6a8b1efe6 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Tue, 7 Mar 2023 11:30:24 -0500 Subject: [PATCH 546/861] drm/amdgpu: dGPU mode set VRAM range lpfn as exclusive TTM place lpfn is exclusive used as end (start + size) in drm and buddy allocator, adev->gmc memory partition range lpfn is inclusive (start + size - 1), should plus 1 to set TTM place lpfn. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 85ad355815fe..b2d11c4f39b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -134,7 +134,11 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (adev->gmc.mem_partitions && abo->mem_id >= 0) { places[c].fpfn = adev->gmc.mem_partitions[abo->mem_id].range.fpfn; - places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn; + /* + * memory partition range lpfn is inclusive start + size - 1 + * TTM place lpfn is exclusive start + size + */ + places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn + 1; } else { places[c].fpfn = 0; places[c].lpfn = 0; From 3ebfd221c1a83e5f0edadb87d173d8fd93d1d125 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 8 Mar 2023 11:57:00 -0500 Subject: [PATCH 547/861] drm/amdkfd: Store xcp partition id to amdgpu bo For memory accounting per compute partition and export drm amdgpu bo and then import to KFD, we need the xcp id to account the memory usage or find the KFD node of the original amdgpu bo to create the KFD bo on the correct adev KFD node. Set xcp_id_plus1 of amdgpu_bo_param to create bo and store xcp_id to amddgpu bo. Add helper macro to get the mem_id from adev and xcp_id. v2: squash in fix ("drm/amdgpu: Fix BO creation failure on GFX 9.4.3 dGPU") Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 11 ++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 12 ++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 ++-- 10 files changed, 42 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 324cb566ca2f..05c54776951b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -330,6 +330,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag); +#define KFD_XCP_MEM_ID(adev, xcp_id) \ + ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ + (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) + #define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c234dc0db799..8724a0be31b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1634,6 +1634,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t *offset, uint32_t flags, bool criu_resume) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; @@ -1641,7 +1642,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; uint64_t aligned_size; - int8_t mem_id = -1; + int8_t xcp_id = -1; u64 alloc_flags; int ret; @@ -1660,7 +1661,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } - mem_id = avm->mem_id; + xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1718,12 +1719,12 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( goto err_reserve_limit; } - pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s mem_id %d\n", + pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n", va, (*mem)->aql_queue ? size << 1 : size, - domain_string(alloc_domain), mem_id); + domain_string(alloc_domain), xcp_id); ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, - bo_type, NULL, &gobj, mem_id + 1); + bo_type, NULL, &gobj, xcp_id + 1); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 33ebee18b80d..7e8839cc6f58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -98,7 +98,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj, int8_t mem_id_plus1) + struct drm_gem_object **obj, int8_t xcp_id_plus1) { struct amdgpu_bo *bo; struct amdgpu_bo_user *ubo; @@ -116,7 +116,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.flags = flags; bp.domain = initial_domain; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.mem_id_plus1 = mem_id_plus1; + bp.xcp_id_plus1 = xcp_id_plus1; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 646c4fcc8e40..f30264782ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -43,7 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj, int8_t mem_id_plus1); + struct drm_gem_object **obj, int8_t xcp_id_plus1); int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b2d11c4f39b0..42c02f48c3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -131,14 +131,15 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_VRAM) { unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; + int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); - if (adev->gmc.mem_partitions && abo->mem_id >= 0) { - places[c].fpfn = adev->gmc.mem_partitions[abo->mem_id].range.fpfn; + if (adev->gmc.mem_partitions && mem_id >= 0) { + places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; /* * memory partition range lpfn is inclusive start + size - 1 * TTM place lpfn is exclusive start + size */ - places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn + 1; + places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; } else { places[c].fpfn = 0; places[c].lpfn = 0; @@ -583,8 +584,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->flags = bp->flags; - /* bo->mem_id -1 means any partition */ - bo->mem_id = bp->mem_id_plus1 - 1; + if (adev->gmc.mem_partitions) + /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */ + bo->xcp_id = bp->xcp_id_plus1 - 1; + else + /* For GPUs without spatial partitioning */ + bo->xcp_id = 0; if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index eb24a66ccee5..05496b97ef93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -56,8 +56,8 @@ struct amdgpu_bo_param { bool no_wait_gpu; struct dma_resv *resv; void (*destroy)(struct ttm_buffer_object *bo); - /* memory partition number plus 1, 0 means any partition */ - int8_t mem_id_plus1; + /* xcp partition number plus 1, 0 means any partition */ + int8_t xcp_id_plus1; }; /* bo virtual addresses in a vm */ @@ -111,8 +111,12 @@ struct amdgpu_bo { #endif struct kgd_mem *kfd_bo; - /* memory partition number, -1 means any partition */ - int8_t mem_id; + /* + * For GPUs with spatial partitioning, xcp partition number, -1 means + * any partition. For other ASICs without spatial partition, always 0 + * for memory accounting. + */ + int8_t xcp_id; }; struct amdgpu_bo_user { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 129c593cb2bd..23101c82519a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1051,6 +1051,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; enum ttm_caching caching; @@ -1060,7 +1061,10 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->gobj = &bo->base; - gtt->pool_id = abo->mem_id; + if (adev->gmc.mem_partitions && abo->xcp_id >= 0) + gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); + else + gtt->pool_id = abo->xcp_id; if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) caching = ttm_write_combined; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 62fc7e8d326e..cc3b1b596e56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -502,6 +502,7 @@ exit: int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int level, bool immediate, struct amdgpu_bo_vm **vmbo) { + struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm); struct amdgpu_bo_param bp; struct amdgpu_bo *bo; struct dma_resv *resv; @@ -534,7 +535,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.no_wait_gpu = immediate; - bp.mem_id_plus1 = vm->mem_id + 1; + bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; @@ -560,7 +561,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.resv = bo->tbo.base.resv; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.mem_id_plus1 = vm->mem_id + 1; + bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 263d17a8b433..7ea80bdf8e1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1248,7 +1248,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || (is_vram && adev == bo_adev && - bo->mem_id == vm->mem_id); + KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); snoop = true; if (uncached) { mtype = MTYPE_UC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index a700d9ccd054..45959892bc0f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -556,7 +556,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bp.type = ttm_bo_type_device; bp.resv = NULL; if (node->xcp) - bp.mem_id_plus1 = node->xcp->mem_id + 1; + bp.xcp_id_plus1 = node->xcp->id + 1; r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { @@ -567,7 +567,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", bo->tbo.resource->start << PAGE_SHIFT, bp.size, - bp.mem_id_plus1 - 1); + bp.xcp_id_plus1 - 1); r = amdgpu_bo_reserve(bo, true); if (r) { From 2fa9ff25de08e598af051c76b216d2f073b2ee89 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 9 Mar 2023 19:30:02 -0500 Subject: [PATCH 548/861] drm/amdgpu: KFD graphics interop support compute partition kfd_ioctl_get_dmabuf use the amdgpu bo xcp_id to get the gpu_id of the KFD node from the exported dmabuf_adev, and then create kfd bo on the correct adev and KFD node when importing the amdgpu bo to KFD. Remove function kfd_device_by_adev, it is not needed as it is the same result as dmabuf_adev->kfd.dev->nodes[0]->id. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 14 ++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 - drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 18 ------------------ 5 files changed, 10 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bbbfe9ec4adf..00edb13d2124 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -498,7 +498,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, - uint32_t *flags) + uint32_t *flags, int8_t *xcp_id) { struct dma_buf *dma_buf; struct drm_gem_object *obj; @@ -542,6 +542,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; } + if (xcp_id) + *xcp_id = bo->xcp_id; out_put: dma_buf_put(dma_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 05c54776951b..4e6221bccffe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -241,7 +241,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, - uint32_t *flags); + uint32_t *flags, int8_t *xcp_id); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, struct amdgpu_device *src); int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f85ac4dbc673..fcad90d53c9b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1499,6 +1499,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; + int8_t xcp_id; unsigned int i; int r; @@ -1519,17 +1520,14 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, &dmabuf_adev, &args->size, metadata_buffer, args->metadata_size, - &args->metadata_size, &flags); + &args->metadata_size, &flags, &xcp_id); if (r) goto exit; - /* Reverse-lookup gpu_id from kgd pointer */ - dev = kfd_device_by_adev(dmabuf_adev); - if (!dev) { - r = -EINVAL; - goto exit; - } - args->gpu_id = dev->id; + if (xcp_id >= 0) + args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; + else + args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id; args->flags = flags; /* Copy metadata buffer to user mode */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 214d950f948e..44f4d5509db6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1068,7 +1068,6 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, uint32_t vmid) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index a8e25aecf839..dbb6159344b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -125,24 +125,6 @@ struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev) -{ - struct kfd_topology_device *top_dev; - struct kfd_node *device = NULL; - - down_read(&topology_lock); - - list_for_each_entry(top_dev, &topology_device_list, list) - if (top_dev->gpu && top_dev->gpu->adev == adev) { - device = top_dev->gpu; - break; - } - - up_read(&topology_lock); - - return device; -} - /* Called with write topology_lock acquired */ static void kfd_release_topology_device(struct kfd_topology_device *dev) { From b125b80bd546d72d08ab64d63cfc8efa397b6779 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 13 Mar 2023 12:03:18 -0400 Subject: [PATCH 549/861] drm/amdgpu: use xcp partition ID for amdgpu_gem Find xcp_id from amdgpu_fpriv, use it for amdgpu_gem_object_create. Signed-off-by: James Zhu Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7e8839cc6f58..fec9df354652 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -337,7 +337,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, initial_domain, - flags, ttm_bo_type_device, resv, &gobj, 0); + flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); if (r && r != -ERESTARTSYS) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -380,6 +380,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_userptr *args = data; + struct amdgpu_fpriv *fpriv = filp->driver_priv; struct drm_gem_object *gobj; struct hmm_range *range; struct amdgpu_bo *bo; @@ -406,7 +407,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, - 0, ttm_bo_type_device, NULL, &gobj, 0); + 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); if (r) return r; @@ -909,6 +910,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | @@ -932,7 +934,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, domain = amdgpu_bo_get_preferred_domain(adev, amdgpu_display_supported_domains(adev, flags)); r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, - ttm_bo_type_device, NULL, &gobj, 0); + ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); if (r) return -ENOMEM; From 315e29eca57f85107cc6f687c2d510aa532fb3f0 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 20 Mar 2023 11:21:38 -0400 Subject: [PATCH 550/861] drm/amdkfd: Move local_mem_info to kfd_node We need to track memory usage on a per partition basis. To do that, store the local memory information in KFD node instead of kfd device. v2: squash in fix ("amdkfd: Use mem_id to access mem_partition info") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 17 +++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 +++++++----- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++++--- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 +++++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 7 ++++--- 7 files changed, 36 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 00edb13d2124..85df73f2c85e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -428,14 +428,23 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, } void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, - struct kfd_local_mem_info *mem_info) + struct kfd_local_mem_info *mem_info, + uint8_t xcp_id) { memset(mem_info, 0, sizeof(*mem_info)); - mem_info->local_mem_size_public = adev->gmc.visible_vram_size; - mem_info->local_mem_size_private = adev->gmc.real_vram_size - + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) + mem_info->local_mem_size_public = + KFD_XCP_MEMORY_SIZE(adev, xcp_id); + else + mem_info->local_mem_size_private = + KFD_XCP_MEMORY_SIZE(adev, xcp_id); + } else { + mem_info->local_mem_size_public = adev->gmc.visible_vram_size; + mem_info->local_mem_size_private = adev->gmc.real_vram_size - adev->gmc.visible_vram_size; - + } mem_info->vram_width = adev->gmc.vram_width; pr_debug("Address base: %pap public 0x%llx private 0x%llx\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4e6221bccffe..4bf6f5659568 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -231,7 +231,8 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, - struct kfd_local_mem_info *mem_info); + struct kfd_local_mem_info *mem_info, + uint8_t xcp_id); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); @@ -334,10 +335,11 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) -#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ - (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ - (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ - (n)->adev->gmc.real_vram_size) +#define KFD_XCP_MEMORY_SIZE(adev, xcp_id)\ + ((adev)->gmc.num_mem_partitions && (xcp_id) >= 0 ?\ + (adev)->gmc.mem_partitions[KFD_XCP_MEM_ID((adev), (xcp_id))].size /\ + (adev)->xcp_mgr->num_xcp_per_mem_partition :\ + (adev)->gmc.real_vram_size) #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index fcad90d53c9b..1ae867482bc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1023,11 +1023,12 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev) if (dev->kfd->use_iommu_v2) return false; - if (dev->kfd->local_mem_info.local_mem_size_private == 0 && - dev->kfd->local_mem_info.local_mem_size_public > 0) + if (dev->local_mem_info.local_mem_size_private == 0 && + dev->local_mem_info.local_mem_size_public > 0) return true; - if (dev->kfd->local_mem_info.local_mem_size_public == 0 && dev->kfd->adev->gmc.is_app_apu) { + if (dev->local_mem_info.local_mem_size_public == 0 && + dev->kfd->adev->gmc.is_app_apu) { pr_debug("APP APU, Consider like a large bar system\n"); return true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 1aaf933f9f48..950af6820153 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2191,7 +2191,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - local_mem_info = kdev->kfd->local_mem_info; + local_mem_info = kdev->local_mem_info; sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index db5b53fcdf11..d41da964d2f5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -726,7 +726,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); @@ -756,7 +755,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (node->xcp) { dev_info(kfd_device, "KFD node %d partition %d size %lldM\n", node->node_id, node->xcp->mem_id, - KFD_XCP_MEMORY_SIZE(node) >> 20); + KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20); } if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && @@ -783,6 +782,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, } node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); + + amdgpu_amdkfd_get_local_mem_info(kfd->adev, + &node->local_mem_info, node->xcp->id); + /* Initialize the KFD node */ if (kfd_init_node(node)) { dev_err(kfd_device, "Error initializing KFD node\n"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 44f4d5509db6..3bd222e8f6c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -313,6 +313,8 @@ struct kfd_node { unsigned int compute_vmid_bitmap; + struct kfd_local_mem_info local_mem_info; + struct kfd_dev *kfd; }; @@ -335,7 +337,6 @@ struct kfd_dev { */ struct kgd2kfd_shared_resources shared_resources; - struct kfd_local_mem_info local_mem_info; const struct kfd2kgd_calls *kfd2kgd; struct mutex doorbell_mutex; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index dbb6159344b3..e0bacf017a40 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1152,8 +1152,8 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) if (!gpu) return 0; - local_mem_size = gpu->kfd->local_mem_info.local_mem_size_private + - gpu->kfd->local_mem_info.local_mem_size_public; + local_mem_size = gpu->local_mem_info.local_mem_size_private + + gpu->local_mem_info.local_mem_size_public; buf[0] = gpu->adev->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16); @@ -1234,7 +1234,8 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * for APUs - If CRAT from ACPI reports more than one bank, then * all the banks will report the same mem_clk_max information */ - amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, + dev->gpu->xcp->id); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; From 1c77527a69d5ca19cb276e2728992d922b687f35 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 20 Mar 2023 11:22:30 -0400 Subject: [PATCH 551/861] drm/amdkfd: Fix memory reporting on GFX 9.4.3 This patch fixes memory reporting on the GFX 9.4.3 APU and dGPU by reporting available memory on a per partition basis. If its an APU, available and used memory calculations take into account system and TTM memory. v2: squash in fix ("drm/amdkfd: Fix array out of bound warning") squash in fix ("drm/amdgpu: Update memory reporting for GFX9.4.3") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 +-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 81 ++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 14 ++-- 5 files changed, 84 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4bf6f5659568..948d362adabb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -35,6 +35,7 @@ #include #include "amdgpu_sync.h" #include "amdgpu_vm.h" +#include "amdgpu_xcp.h" extern uint64_t amdgpu_amdkfd_total_mem_size; @@ -98,8 +99,8 @@ struct amdgpu_amdkfd_fence { struct amdgpu_kfd_dev { struct kfd_dev *dev; - int64_t vram_used; - uint64_t vram_used_aligned; + int64_t vram_used[MAX_XCP]; + uint64_t vram_used_aligned[MAX_XCP]; bool init_complete; struct work_struct reset_work; @@ -287,7 +288,8 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, void *drm_priv); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); -size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev); +size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, + uint8_t xcp_id); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, @@ -327,9 +329,9 @@ void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag); + uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag); + uint64_t size, u32 alloc_flag, int8_t xcp_id); #define KFD_XCP_MEM_ID(adev, xcp_id) \ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 8724a0be31b8..cc37f04651e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -157,12 +157,13 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size) * Return: returns -ENOMEM in case of error, ZERO otherwise */ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag) + uint64_t size, u32 alloc_flag, int8_t xcp_id) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); size_t system_mem_needed, ttm_mem_needed, vram_needed; int ret = 0; + uint64_t vram_size = 0; system_mem_needed = 0; ttm_mem_needed = 0; @@ -177,6 +178,17 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, * 2M BO chunk. */ vram_needed = size; + /* + * For GFX 9.4.3, get the VRAM size from XCP structs + */ + if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) + return -EINVAL; + + vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); + if (adev->gmc.is_app_apu) { + system_mem_needed = size; + ttm_mem_needed = size; + } } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { system_mem_needed = size; } else if (!(alloc_flag & @@ -196,8 +208,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || - (adev && adev->kfd.vram_used + vram_needed > - adev->gmc.real_vram_size - reserved_for_pt)) { + (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > + vram_size - reserved_for_pt)) { ret = -ENOMEM; goto release; } @@ -207,9 +219,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, */ WARN_ONCE(vram_needed && !adev, "adev reference can't be null when vram is used"); - if (adev) { - adev->kfd.vram_used += vram_needed; - adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); + if (adev && xcp_id >= 0) { + adev->kfd.vram_used[xcp_id] += vram_needed; + adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ? + vram_needed : + ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); } kfd_mem_limit.system_mem_used += system_mem_needed; kfd_mem_limit.ttm_mem_used += ttm_mem_needed; @@ -220,7 +234,7 @@ release: } void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag) + uint64_t size, u32 alloc_flag, int8_t xcp_id) { spin_lock(&kfd_mem_limit.mem_limit_lock); @@ -230,9 +244,19 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { WARN_ONCE(!adev, "adev reference can't be null when alloc mem flags vram is set"); + if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) + goto release; + if (adev) { - adev->kfd.vram_used -= size; - adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN); + adev->kfd.vram_used[xcp_id] -= size; + if (adev->gmc.is_app_apu) { + adev->kfd.vram_used_aligned[xcp_id] -= size; + kfd_mem_limit.system_mem_used -= size; + kfd_mem_limit.ttm_mem_used -= size; + } else { + adev->kfd.vram_used_aligned[xcp_id] -= + ALIGN(size, VRAM_AVAILABLITY_ALIGN); + } } } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { kfd_mem_limit.system_mem_used -= size; @@ -242,8 +266,8 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); goto release; } - WARN_ONCE(adev && adev->kfd.vram_used < 0, - "KFD VRAM memory accounting unbalanced"); + WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, + "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id); WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, "KFD TTM memory accounting unbalanced"); WARN_ONCE(kfd_mem_limit.system_mem_used < 0, @@ -259,7 +283,8 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) u32 alloc_flags = bo->kfd_bo->alloc_flags; u64 size = amdgpu_bo_size(bo); - amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags, + bo->xcp_id); kfree(bo->kfd_bo); } @@ -1609,23 +1634,42 @@ out_unlock: return ret; } -size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) +size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, + uint8_t xcp_id) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); ssize_t available; + uint64_t vram_available, system_mem_available, ttm_mem_available; spin_lock(&kfd_mem_limit.mem_limit_lock); - available = adev->gmc.real_vram_size - - adev->kfd.vram_used_aligned + vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) + - adev->kfd.vram_used_aligned[xcp_id] - atomic64_read(&adev->vram_pin_size) - reserved_for_pt; + + if (adev->gmc.is_app_apu) { + system_mem_available = no_system_mem_limit ? + kfd_mem_limit.max_system_mem_limit : + kfd_mem_limit.max_system_mem_limit - + kfd_mem_limit.system_mem_used; + + ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - + kfd_mem_limit.ttm_mem_used; + + available = min3(system_mem_available, ttm_mem_available, + vram_available); + available = ALIGN_DOWN(available, PAGE_SIZE); + } else { + available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN); + } + spin_unlock(&kfd_mem_limit.mem_limit_lock); if (available < 0) available = 0; - return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN); + return available; } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( @@ -1713,7 +1757,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags, + xcp_id); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; @@ -1781,7 +1826,7 @@ err_node_allow: /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: mutex_destroy(&(*mem)->lock); if (gobj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 68b63b970ce8..9c5912b9d8bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -24,8 +24,11 @@ #ifndef AMDGPU_XCP_H #define AMDGPU_XCP_H +#include #include +#include "amdgpu_ctx.h" + #define MAX_XCP 8 #define AMDGPU_XCP_MODE_NONE -1 @@ -34,6 +37,8 @@ #define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_LOCKED (1 << 0) +struct amdgpu_fpriv; + enum AMDGPU_XCP_IP_BLOCK { AMDGPU_XCP_GFXHUB, AMDGPU_XCP_GFX, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1ae867482bc7..a9efff94390b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1044,7 +1044,8 @@ static int kfd_ioctl_get_available_memory(struct file *filep, if (!pdd) return -EINVAL; - args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev); + args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, + pdd->dev->node_id); kfd_unlock_pdd(pdd); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 45959892bc0f..c1ab70faf36e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -280,7 +280,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage) if (update_mem_usage && !p->xnack_enabled) { pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } mutex_destroy(&prange->lock); mutex_destroy(&prange->migrate_mutex); @@ -313,7 +313,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, p = container_of(svms, struct kfd_process, svms); if (!p->xnack_enabled && update_mem_usage && amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) { + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) { pr_info("SVM mapping failed, exceeds resident system memory limit\n"); kfree(prange); return NULL; @@ -3037,10 +3037,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) size = (pchild->last - pchild->start + 1) << PAGE_SHIFT; if (xnack_enabled) { amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } else { r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); if (r) goto out_unlock; reserved_size += size; @@ -3050,10 +3050,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) size = (prange->last - prange->start + 1) << PAGE_SHIFT; if (xnack_enabled) { amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } else { r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); if (r) goto out_unlock; reserved_size += size; @@ -3066,7 +3066,7 @@ out_unlock: if (r) amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); else /* Change xnack mode must be inside svms lock, to avoid race with * svm_range_deferred_list_work unreserve memory in parallel. From 25f50704343de1bea70100ad41621b5737a6a96b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 23 Mar 2023 08:45:56 -0400 Subject: [PATCH 552/861] drm/amdkfd: APU mode set max svm range pages svm_migrate_init set the max svm range pages based on the KFD nodes partition size. APU mode don't init pgmap because there is no migration. kgd2kfd_device_init calls svm_migrate_init after KFD nodes allocation and initialization. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 5 ++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 7 +++++-- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 ++++++++++----- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d41da964d2f5..882ff86bba08 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -724,9 +724,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - svm_migrate_init(kfd->adev); - - dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); @@ -794,6 +791,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->nodes[i] = node; } + svm_migrate_init(kfd->adev); + if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 199d32c7c289..2512bf681112 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -1000,6 +1000,11 @@ int svm_migrate_init(struct amdgpu_device *adev) if (!KFD_IS_SOC15(kfddev->dev)) return -EINVAL; + svm_range_set_max_pages(adev); + + if (adev->gmc.is_app_apu) + return 0; + pgmap = &kfddev->pgmap; memset(pgmap, 0, sizeof(*pgmap)); @@ -1042,8 +1047,6 @@ int svm_migrate_init(struct amdgpu_device *adev) amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); - svm_range_set_max_pages(adev); - pr_info("HMM registered %ldMB device memory\n", size >> 20); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c1ab70faf36e..206851c9e642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1938,14 +1938,19 @@ void svm_range_set_max_pages(struct amdgpu_device *adev) { uint64_t max_pages; uint64_t pages, _pages; + uint64_t min_pages = 0; + int i; + + for (i = 0; i < adev->kfd.dev->num_nodes; i++) { + pages = KFD_XCP_MEMORY_SIZE(adev, adev->kfd.dev->nodes[i]->xcp->id) >> 17; + pages = clamp(pages, 1ULL << 9, 1ULL << 18); + pages = rounddown_pow_of_two(pages); + min_pages = min_not_zero(min_pages, pages); + } - /* 1/32 VRAM size in pages */ - pages = adev->gmc.real_vram_size >> 17; - pages = clamp(pages, 1ULL << 9, 1ULL << 18); - pages = rounddown_pow_of_two(pages); do { max_pages = READ_ONCE(max_svm_range_pages); - _pages = min_not_zero(max_pages, pages); + _pages = min_not_zero(max_pages, min_pages); } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages); } From 44a976655597b60bd501148abe66462bdc33fb6b Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Fri, 31 Mar 2023 17:16:41 +0800 Subject: [PATCH 553/861] drm/amdgpu: route ioctls on primary node of XCPs to primary device During XCP init, unlike the primary device, there is no amdgpu_device attached to each XCP's drm_device In case that user trying to open/close the primary node of XCP drm_device this rerouting is to solve the NULL pointer issue causing by referring to any member of the amdgpu_device BUG: unable to handle page fault for address: 0000000000020c80 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page Oops: 0002 [#1] PREEMPT SMP NOPTI Call Trace: lock_timer_base+0x6b/0x90 try_to_del_timer_sync+0x2b/0x80 del_timer_sync+0x29/0x40 flush_delayed_work+0x1c/0x50 amdgpu_driver_open_kms+0x2c/0x280 [amdgpu] drm_file_alloc+0x1b3/0x260 [drm] drm_open+0xaa/0x280 [drm] drm_stub_open+0xa2/0x120 [drm] chrdev_open+0xa6/0x1c0 Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 610c32c4f5af..daeb6bcc9245 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -241,6 +241,7 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) /* Redirect all IOCTLs to the primary device */ p_ddev->render->dev = ddev; + p_ddev->primary->dev = ddev; p_ddev->vma_offset_manager = ddev->vma_offset_manager; adev->xcp_mgr->xcp[i].ddev = p_ddev; } From 84b4dd3f84de424a68e1fda0d483530ddaa92b45 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 31 Mar 2023 11:18:12 -0400 Subject: [PATCH 554/861] drm/amdkfd: Refactor migrate init to support partition switch Rename smv_migrate_init to a better name kgd2kfd_init_zone_device because it setup zone devive pgmap for page migration and keep it in kfd_migrate.c to access static functions svm_migrate_pgmap_ops. Call it only once in amdgpu_device_ip_init after adev ip blocks are initialized, but before amdgpu_amdkfd_device_init initialize kfd nodes which enable SVM support based on pgmap. svm_range_set_max_pages is called by kgd2kfd_device_init everytime after switching compute partition mode. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +++----- drivers/gpu/drm/amd/amdkfd/kfd_migrate.h | 9 --------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++++ 6 files changed, 23 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 948d362adabb..48d12dbff968 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -372,6 +372,17 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif + +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev); +#else +static inline +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) +{ + return 0; +} +#endif + /* KGD2KFD callbacks */ int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger); int kgd2kfd_resume_mm(struct mm_struct *mm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 02ee79b7b56d..f0666230b2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2633,8 +2633,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) goto init_failed; /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) + if (!adev->gmc.xgmi.pending_reset) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + } amdgpu_fru_get_product_info(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 882ff86bba08..bf32e547182c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -32,6 +32,7 @@ #include "kfd_iommu.h" #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" +#include "kfd_svm.h" #include "kfd_migrate.h" #include "amdgpu.h" #include "amdgpu_xcp.h" @@ -791,7 +792,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->nodes[i] = node; } - svm_migrate_init(kfd->adev); + svm_range_set_max_pages(kfd->adev); if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 2512bf681112..35cf6558cf1b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -988,7 +988,7 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { /* Each VRAM page uses sizeof(struct page) on system memory */ #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) -int svm_migrate_init(struct amdgpu_device *adev) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) { struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; @@ -996,12 +996,10 @@ int svm_migrate_init(struct amdgpu_device *adev) unsigned long size; void *r; - /* Page migration works on Vega10 or newer */ - if (!KFD_IS_SOC15(kfddev->dev)) + /* Page migration works on gfx9 or newer */ + if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1)) return -EINVAL; - svm_range_set_max_pages(adev); - if (adev->gmc.is_app_apu) return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index a5d7e6d22264..487f26368164 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -47,15 +47,6 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); -int svm_migrate_init(struct amdgpu_device *adev); - -#else - -static inline int svm_migrate_init(struct amdgpu_device *adev) -{ - return 0; -} - #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ #endif /* KFD_MIGRATE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 021def496f5a..762679835e31 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -265,6 +265,10 @@ static inline int kfd_criu_resume_svm(struct kfd_process *p) return 0; } +static inline void svm_range_set_max_pages(struct amdgpu_device *adev) +{ +} + #define KFD_IS_SVM_API_SUPPORTED(dev) false #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ From 3cde91172d2e9d8d8dc6e0d62b7c829de503825c Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 19 Apr 2023 17:39:35 -0400 Subject: [PATCH 555/861] drm/amdgpu: Correct get_xcp_mem_id calculation Current calculation only works for NPS4/QPX mode, correct it for NPS4/CPX mode. Signed-off-by: Philip Yang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 4ca932a62ce6..93e9f947a85d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -518,10 +518,9 @@ out: static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev, int xcc_id, uint8_t *mem_id) { - /* TODO: Check if any validation is required based on current - * memory/spatial modes - */ + /* memory/spatial modes validation check is already done */ *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp; + *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition; return 0; } From 40b832aac03249ebc70479da9f3ecf2789deaeed Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 12 Apr 2023 16:56:29 -0400 Subject: [PATCH 556/861] drm/amdgpu: Enable IH CAM on GFX9.4.3 This patch enables IH CAM on GFX9.4.3 ASIC. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/vega20_ih.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index e1552d645308..755259e96bbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -265,7 +265,7 @@ static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev, ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, DOORBELL0_CTRL_ENTRY_0, BIF_DOORBELL0_RANGE_SIZE_ENTRY, - 0x4); + 0x8); ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, @@ -278,7 +278,7 @@ static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev, S2A_DOORBELL_PORT1_RANGE_OFFSET, 0); ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, - S2A_DOORBELL_PORT1_RANGE_SIZE, 0x4); + S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8); ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 17ccf02462ab..4d719df376a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -334,7 +334,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index)); /* Enable IH Retry CAM */ - if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) + if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0) || + adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)) WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN, ENABLE, 1); else From af2ba368838ee4913e758f34e3d8bbfeb110be36 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 27 Feb 2023 16:31:56 +0800 Subject: [PATCH 557/861] drm/amdgpu: convert logical instance mask to physical one Convert instance mask for the convenience of RAS TA. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++-- .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 18 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 7 ++++++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bba8cfeff71f..a4d0bc80ac92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -677,12 +677,14 @@ enum amd_hw_ip_block_type { #define IP_VERSION_REV(ver) ((ver) & 0xFF) struct amdgpu_ip_map_info { - /* Map of logical to actual dev instances */ + /* Map of logical to actual dev instances/mask */ uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE]; int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst); - + uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask); }; struct amd_powerplay { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 93e9f947a85d..68d1a0fc5f5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -229,6 +229,23 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, return dev_inst; } +static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask) +{ + uint32_t dev_mask = 0; + int8_t log_inst, dev_inst; + + while (mask) { + log_inst = ffs(mask) - 1; + dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst); + dev_mask |= (1 << dev_inst); + mask &= ~(1 << log_inst); + } + + return dev_mask; +} + static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, enum amd_hw_ip_block_type ip_block, uint32_t inst_mask) @@ -257,6 +274,7 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; + adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask; } /* Fixed pattern for smn addressing on different AIDs: diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 3730c5ec202f..96948a59f8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -25,7 +25,12 @@ #define __SOC15_COMMON_H__ /* GET_INST returns the physical instance corresponding to a logical instance */ -#define GET_INST(ip, inst) (adev->ip_map.logical_to_dev_inst? adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst): inst) +#define GET_INST(ip, inst) \ + (adev->ip_map.logical_to_dev_inst ? \ + adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst) +#define GET_MASK(ip, mask) \ + (adev->ip_map.logical_to_dev_mask ? \ + adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask) /* Register Access Macros */ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) From 2c22ed0bdb0cb6da9408593eafa6137325576017 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 27 Feb 2023 18:25:23 +0800 Subject: [PATCH 558/861] drm/amdgpu: add instance mask for RAS inject User can specify injected instances by the mask. For backward compatibility, the mask value is incorporated into sub block index without interface change of RAS TA. User uses logical mask and driver should convert it to physical value before sending it to RAS TA. v2: update parameter name. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 21 ++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 23 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 5 +++-- 8 files changed, 56 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index ec79a5c2f500..59b8b26e2caf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1672,14 +1672,33 @@ int psp_ras_initialize(struct psp_context *psp) } int psp_ras_trigger_error(struct psp_context *psp, - struct ta_ras_trigger_error_input *info) + struct ta_ras_trigger_error_input *info, uint32_t instance_mask) { struct ta_ras_shared_memory *ras_cmd; + struct amdgpu_device *adev = psp->adev; int ret; + uint32_t dev_mask; if (!psp->ras_context.context.initialized) return -EINVAL; + switch (info->block_id) { + case TA_RAS_BLOCK__GFX: + dev_mask = GET_MASK(GC, instance_mask); + break; + case TA_RAS_BLOCK__SDMA: + dev_mask = GET_MASK(SDMA0, instance_mask); + break; + default: + dev_mask = instance_mask; + break; + } + + /* reuse sub_block_index for backward compatibility */ + dev_mask <<= AMDGPU_RAS_INST_SHIFT; + dev_mask &= AMDGPU_RAS_INST_MASK; + info->sub_block_index |= dev_mask; + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 0a409da749d1..d84323923a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -486,7 +486,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable); int psp_ras_trigger_error(struct psp_context *psp, - struct ta_ras_trigger_error_input *info); + struct ta_ras_trigger_error_input *info, uint32_t instance_mask); int psp_ras_terminate(struct psp_context *psp); int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 64f80e8cbd63..7ae08f168f99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -256,6 +256,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, int block_id; uint32_t sub_block; u64 address, value; + /* default value is 0 if the mask is not set by user */ + u32 instance_mask = 0; if (*pos) return -EINVAL; @@ -306,7 +308,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->op = op; if (op == 2) { - if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", + if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x", + &sub_block, &address, &value, &instance_mask) != 4 && + sscanf(str, "%*s %*s %*s %u %llu %llu %u", + &sub_block, &address, &value, &instance_mask) != 4 && + sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", &sub_block, &address, &value) != 3 && sscanf(str, "%*s %*s %*s %u %llu %llu", &sub_block, &address, &value) != 3) @@ -314,6 +320,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->head.sub_block_index = sub_block; data->inject.address = address; data->inject.value = value; + data->inject.instance_mask = instance_mask; } } else { if (size < sizeof(*data)) @@ -341,7 +348,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. * name: the name of IP. * - * inject has two more members than head, they are address, value. + * inject has three more members than head, they are address, value and mask. * As their names indicate, inject operation will write the * value to the address. * @@ -365,7 +372,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * * echo "disable " > /sys/kernel/debug/dri//ras/ras_ctrl * echo "enable " > /sys/kernel/debug/dri//ras/ras_ctrl - * echo "inject
> /sys/kernel/debug/dri//ras/ras_ctrl + * echo "inject
" > /sys/kernel/debug/dri//ras/ras_ctrl * * Where N, is the card which you want to affect. * @@ -382,13 +389,14 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * * The sub-block is a the sub-block index, pass 0 if there is no sub-block. * The address and value are hexadecimal numbers, leading 0x is optional. + * The mask means instance mask, is optional, default value is 0x1. * * For instance, * * .. code-block:: bash * * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl - * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl + * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl * * How to check the result of the operation? @@ -1117,13 +1125,14 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { if (block_obj->hw_ops->ras_error_inject) - ret = block_obj->hw_ops->ras_error_inject(adev, info); + ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); } else { /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ if (block_obj->hw_ops->ras_error_inject) - ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); + ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, + info->instance_mask); else /*If not defined .ras_error_inject, use default ras_error_inject*/ - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); } if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index e96333d0c269..bc43f7db17cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -32,6 +32,11 @@ struct amdgpu_iv_entry; #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define AMDGPU_RAS_INST_MASK 0xfffff000 +#define AMDGPU_RAS_INST_SHIFT 0xc enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, @@ -508,6 +513,7 @@ struct ras_inject_if { struct ras_common_if head; uint64_t address; uint64_t value; + uint32_t instance_mask; }; struct ras_cure_if { @@ -545,7 +551,8 @@ struct amdgpu_ras_block_object { }; struct amdgpu_ras_block_hw_ops { - int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); + int (*ras_error_inject)(struct amdgpu_device *adev, + void *inject_if, uint32_t instance_mask); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_status)(struct amdgpu_device *adev); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 439925477fb8..85ee1af963dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1014,7 +1014,8 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, } /* Trigger XGMI/WAFL error */ -static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if) +static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, + void *inject_if, uint32_t instance_mask) { int ret = 0; struct ta_ras_trigger_error_input *block_info = @@ -1026,7 +1027,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *injec if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) dev_warn(adev->dev, "Failed to disallow XGMI power down"); - ret = psp_ras_trigger_error(&adev->psp, block_info); + ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); if (amdgpu_ras_intr_triggered()) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index cc005e3bcd40..de8e70b3db75 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -770,7 +770,7 @@ static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, - void *inject_if); + void *inject_if, uint32_t instance_mask); static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, @@ -6335,7 +6335,7 @@ static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { }; static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, - void *inject_if) + void *inject_if, uint32_t instance_mask) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -6374,7 +6374,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, block_info.value = info->value; mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); mutex_unlock(&adev->grbm_idx_mutex); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index c67e387a97f5..59abe162bbaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -971,7 +971,7 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) } static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, - void *inject_if) + void *inject_if, uint32_t instance_mask) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -987,7 +987,7 @@ static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, block_info.value = info->value; mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); mutex_unlock(&adev->grbm_idx_mutex); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index ec7c049c5952..4906affa6f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1699,7 +1699,8 @@ static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); } -static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, + void *inject_if, uint32_t instance_mask) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -1715,7 +1716,7 @@ static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_ block_info.value = info->value; mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); mutex_unlock(&adev->grbm_idx_mutex); return ret; From 27c5f29526d17a33946a6b977a0274eae320c489 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 13 Mar 2023 16:24:11 +0800 Subject: [PATCH 559/861] drm/amdgpu: reorganize RAS injection flow So GFX RAS injection could use default function if it doesn't define its own injection interface. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 7ae08f168f99..b7d8250a9281 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1123,16 +1123,15 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, block_info.address); } - if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { - if (block_obj->hw_ops->ras_error_inject) + if (block_obj->hw_ops->ras_error_inject) { + if (info->head.block == AMDGPU_RAS_BLOCK__GFX) ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); - } else { - /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ - if (block_obj->hw_ops->ras_error_inject) + else /* Special ras_error_inject is defined (e.g: xgmi) */ ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, info->instance_mask); - else /*If not defined .ras_error_inject, use default ras_error_inject*/ - ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); + } else { + /* default path */ + ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); } if (ret) From 6e3c51a5814aff70b72e8b2a9953bdac7aea2f44 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 13 Mar 2023 16:34:19 +0800 Subject: [PATCH 560/861] drm/amdgpu: remove RAS GFX injection for gfx_v9_4/gfx_v9_4_2 No special requirement in RAS injection for the two versions, switch to use default injection interface. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c | 24 ------------------------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 24 ------------------------ 2 files changed, 48 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index 59abe162bbaf..bc8416afb62c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -970,29 +970,6 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); } -static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, - void *inject_if, uint32_t instance_mask) -{ - struct ras_inject_if *info = (struct ras_inject_if *)inject_if; - int ret; - struct ta_ras_trigger_error_input block_info = { 0 }; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; - - block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); - block_info.sub_block_index = info->head.sub_block_index; - block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); - block_info.address = info->address; - block_info.value = info->value; - - mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); - mutex_unlock(&adev->grbm_idx_mutex); - - return ret; -} - static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 }; @@ -1030,7 +1007,6 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = { - .ras_error_inject = &gfx_v9_4_ras_error_inject, .query_ras_error_count = &gfx_v9_4_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, .query_ras_error_status = &gfx_v9_4_query_ras_error_status, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 4906affa6f8c..2cc3a7cb1f54 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1699,29 +1699,6 @@ static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); } -static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, - void *inject_if, uint32_t instance_mask) -{ - struct ras_inject_if *info = (struct ras_inject_if *)inject_if; - int ret; - struct ta_ras_trigger_error_input block_info = { 0 }; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; - - block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); - block_info.sub_block_index = info->head.sub_block_index; - block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); - block_info.address = info->address; - block_info.value = info->value; - - mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); - mutex_unlock(&adev->grbm_idx_mutex); - - return ret; -} - static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev) { uint32_t i, j; @@ -1945,7 +1922,6 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) } struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { - .ras_error_inject = &gfx_v9_4_2_ras_error_inject, .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, From f464c5dd4d918d4dd84eda7e68d4a0b6d41fe37f Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 20 Mar 2023 18:21:14 +0800 Subject: [PATCH 561/861] drm/amdgpu: add check for RAS instance mask The mask is only needed to be set when RAS block instance number is more than 1 and invalid bits should be also masked out. We only check valid bits for GFX and SDMA block for now, and will add check for other RAS blocks in the future. v2: move the check under injection operation since the mask is only used by RAS error inject. v3: add valid bits handling for SDMA. v4: print message if the mask is adjusted. Signed-off-by: Tao Zhou Hawking Zhang Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 38 +++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index b7d8250a9281..6bb438642cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -333,6 +333,42 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } +static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, + struct ras_debug_if *data) +{ + int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; + uint32_t mask, inst_mask = data->inject.instance_mask; + + /* no need to set instance mask if there is only one instance */ + if (num_xcc <= 1 && inst_mask) { + data->inject.instance_mask = 0; + dev_dbg(adev->dev, + "RAS inject mask(0x%x) isn't supported and force it to 0.\n", + inst_mask); + + return; + } + + switch (data->head.block) { + case AMDGPU_RAS_BLOCK__GFX: + mask = GENMASK(num_xcc - 1, 0); + break; + case AMDGPU_RAS_BLOCK__SDMA: + mask = GENMASK(adev->sdma.num_instances - 1, 0); + break; + default: + mask = 0; + break; + } + + /* remove invalid bits in instance mask */ + data->inject.instance_mask &= mask; + if (inst_mask != data->inject.instance_mask) + dev_dbg(adev->dev, + "Adjust RAS inject mask 0x%x to 0x%x\n", + inst_mask, data->inject.instance_mask); +} + /** * DOC: AMDGPU RAS debugfs control interface * @@ -468,6 +504,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, break; } + amdgpu_ras_instance_mask_check(adev, &data); + /* data.inject.address is offset instead of absolute gpu address */ ret = amdgpu_ras_error_inject(adev, &data.inject); break; From 0409022c15623d91c112e51f38cb62633becd432 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 11 May 2023 17:01:03 +0800 Subject: [PATCH 562/861] drm/amdkfd: Fix null ptr access Avoid access null xcp_mgr pointer. Signed-off-by: Hawking Zhang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index bf32e547182c..2cfef3f9456f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -644,12 +644,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ - partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, AMDGPU_XCP_FL_LOCKED); - if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - partition_mode == AMDGPU_CPX_PARTITION_MODE && - kfd->num_nodes != 1) { - vmid_num_kfd /= 2; - first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + if (kfd->adev->xcp_mgr) { + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, + AMDGPU_XCP_FL_LOCKED); + if (partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + vmid_num_kfd /= 2; + first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + } } /* Verify module parameters regarding mapped process number*/ From 9a3ce1a7a9e5372d8c275bf3fbef4456c8407145 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 12 May 2023 13:22:57 +0800 Subject: [PATCH 563/861] drm/amdgpu: Do not access members of xcp w/o check (v2) Not all the asic needs xcp. ensure check xcp availabity before accessing its member. v2: add missing change in kfd_topology.c Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 85df73f2c85e..739eb7c0d133 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -429,17 +429,17 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info, - uint8_t xcp_id) + struct amdgpu_xcp *xcp) { memset(mem_info, 0, sizeof(*mem_info)); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (xcp) { if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) mem_info->local_mem_size_public = - KFD_XCP_MEMORY_SIZE(adev, xcp_id); + KFD_XCP_MEMORY_SIZE(adev, xcp->id); else mem_info->local_mem_size_private = - KFD_XCP_MEMORY_SIZE(adev, xcp_id); + KFD_XCP_MEMORY_SIZE(adev, xcp->id); } else { mem_info->local_mem_size_public = adev->gmc.visible_vram_size; mem_info->local_mem_size_private = adev->gmc.real_vram_size - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 48d12dbff968..be43d71ba7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -233,7 +233,7 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info, - uint8_t xcp_id); + struct amdgpu_xcp *xcp); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2cfef3f9456f..986543a000bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -784,7 +784,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, atomic_set(&node->sram_ecc_flag, 0); amdgpu_amdkfd_get_local_mem_info(kfd->adev, - &node->local_mem_info, node->xcp->id); + &node->local_mem_info, node->xcp); /* Initialize the KFD node */ if (kfd_init_node(node)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e0bacf017a40..8302d8967158 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1235,7 +1235,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * all the banks will report the same mem_clk_max information */ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, - dev->gpu->xcp->id); + dev->gpu->xcp); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; From d78c71321ea963378cd3c5646ac6c6483d8d1a50 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 2 Feb 2023 18:57:04 +0800 Subject: [PATCH 564/861] drm/amdgpu: add GFX RAS common function The common function can help reduce redundant code. v2: remove xcp operation, only need to do RAS operations for all instances. v3: remove check for GFX RAS support, will be checked in higher level. add amdgpu prefix for the function name. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 19 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 ++++ 2 files changed, 23 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 6098b8b1ae5b..8883d5ee13cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -884,6 +884,25 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, return 0; } +void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, + void *ras_error_status, + void (*func)(struct amdgpu_device *adev, void *ras_error_status, + int xcc_id)) +{ + int i; + int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; + uint32_t xcc_mask = GENMASK(num_xcc - 1, 0); + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + if (err_data) { + err_data->ue_count = 0; + err_data->ce_count = 0; + } + + for_each_inst(i, xcc_mask) + func(adev, ras_error_status, i); +} + uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) { signed long r, cnt = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 3d11b7a0bd75..0df53fe7b199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -494,6 +494,10 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); +void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, + void *ras_error_status, + void (*func)(struct amdgpu_device *adev, void *ras_error_status, + int xcc_id)); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { From bf16235b39d4ca9c8dd47ec1b2faded6ea58f7a2 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 2 Feb 2023 17:20:23 +0800 Subject: [PATCH 565/861] drm/amdgpu: add RAS status query for gfx_v9_4_3 Query GFX RAS status. v2: remove xcp operation. v3: change instance from 0 to xcc_id for register access. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 75 +++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 6cde05421a10..f178e3f565e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2980,6 +2980,81 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } } +static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = { + SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 +}; + +static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev, + int xcc_id) +{ + uint32_t i, j; + uint32_t reg_value; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) { + for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) { + gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id); + reg_value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), + regGCEA_ERR_STATUS); + if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { + dev_warn(adev->dev, + "GCEA err detected at instance: %d, status: 0x%x!\n", + j, reg_value); + } + /* clear after read */ + reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS, + CLEAR_ERROR_STATUS, 0x1); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, + reg_value); + } + } + + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev, + int xcc_id) +{ + uint32_t data; + + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS); + if (data) { + dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); + } + + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS); + if (data) { + dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); + } + + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), + regVML2_WALKER_MEM_ECC_STATUS); + if (data) { + dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, + 0x3); + } +} + +static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, + void *ras_error_status, int xcc_id) +{ + gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id); + gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); +} + +static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) +{ + amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); +} + static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .name = "gfx_v9_4_3", .early_init = gfx_v9_4_3_early_init, From 47e7f527c8256a2fe3e61fcd5f59c18bc3fb53fc Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 3 Feb 2023 10:41:26 +0800 Subject: [PATCH 566/861] drm/amdgpu: add RAS status reset for gfx_v9_4_3 Reset GFX RAS status registers. v2: fix typo in title. remove xcp operation. v3: change instance from 0 to xcc_id for register access. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 41 +++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index f178e3f565e9..e6069d081f71 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -3050,11 +3050,52 @@ static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); } +static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev, + int xcc_id) +{ + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3); +} + +static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev, + int xcc_id) +{ + uint32_t i, j; + uint32_t value; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) { + for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) { + gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id); + value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS); + value = REG_SET_FIELD(value, GCEA_ERR_STATUS, + CLEAR_ERROR_STATUS, 0x1); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, value); + } + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, + void *ras_error_status, int xcc_id) +{ + gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id); + gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id); +} + static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) { amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); } +static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev) +{ + amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status); +} + static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .name = "gfx_v9_4_3", .early_init = gfx_v9_4_3_early_init, From 2b80ffc2d86cce8444d5fc0237afd77f7d18cd1d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 27 Feb 2023 17:36:19 +0800 Subject: [PATCH 567/861] drm/amdgpu: Add gc v9_4_3 ras error status registers GC v9_4_3 introduces UE|CE_ERR_STATUS_LO|HI to log hardware errors Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/gc/gc_9_4_3_offset.h | 192 +++ .../include/asic_reg/gc/gc_9_4_3_sh_mask.h | 1112 +++++++++++++++++ 2 files changed, 1304 insertions(+) diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h index 3100de8b3881..393963502b7a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h @@ -705,6 +705,46 @@ #define regSQC_ICACHE_UTCL1_STATUS_BASE_IDX 0 #define regSQC_DCACHE_UTCL1_STATUS 0x03d8 #define regSQC_DCACHE_UTCL1_STATUS_BASE_IDX 0 +#define regSQC_UE_EDC_LO 0x03d9 +#define regSQC_UE_EDC_LO_BASE_IDX 0 +#define regSQC_UE_EDC_HI 0x03da +#define regSQC_UE_EDC_HI_BASE_IDX 0 +#define regSQC_CE_EDC_LO 0x03db +#define regSQC_CE_EDC_LO_BASE_IDX 0 +#define regSQC_CE_EDC_HI 0x03dc +#define regSQC_CE_EDC_HI_BASE_IDX 0 +#define regSQ_UE_ERR_STATUS_LO 0x03dd +#define regSQ_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regSQ_UE_ERR_STATUS_HI 0x03de +#define regSQ_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regSQ_CE_ERR_STATUS_LO 0x03df +#define regSQ_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regSQ_CE_ERR_STATUS_HI 0x03e0 +#define regSQ_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regLDS_UE_ERR_STATUS_LO 0x03e1 +#define regLDS_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regLDS_UE_ERR_STATUS_HI 0x03e2 +#define regLDS_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regLDS_CE_ERR_STATUS_LO 0x03e3 +#define regLDS_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regLDS_CE_ERR_STATUS_HI 0x03e4 +#define regLDS_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regSP0_UE_ERR_STATUS_LO 0x03e5 +#define regSP0_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regSP0_UE_ERR_STATUS_HI 0x03e6 +#define regSP0_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regSP0_CE_ERR_STATUS_LO 0x03e7 +#define regSP0_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regSP0_CE_ERR_STATUS_HI 0x03e8 +#define regSP0_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regSP1_UE_ERR_STATUS_LO 0x03e9 +#define regSP1_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regSP1_UE_ERR_STATUS_HI 0x03ea +#define regSP1_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regSP1_CE_ERR_STATUS_LO 0x03eb +#define regSP1_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regSP1_CE_ERR_STATUS_HI 0x03ec +#define regSP1_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: xcd0_gc_shsdec @@ -727,6 +767,14 @@ #define regSPI_DSM_CNTL2_BASE_IDX 0 #define regSPI_EDC_CNT 0x0445 #define regSPI_EDC_CNT_BASE_IDX 0 +#define regSPI_UE_ERR_STATUS_LO 0x0446 +#define regSPI_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regSPI_UE_ERR_STATUS_HI 0x0447 +#define regSPI_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regSPI_CE_ERR_STATUS_LO 0x0448 +#define regSPI_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regSPI_CE_ERR_STATUS_HI 0x0449 +#define regSPI_CE_ERR_STATUS_HI_BASE_IDX 0 #define regSPI_DEBUG_BUSY 0x0450 #define regSPI_DEBUG_BUSY_BASE_IDX 0 #define regSPI_CONFIG_PS_CU_EN 0x0452 @@ -871,6 +919,14 @@ #define regTD_STATUS_BASE_IDX 0 #define regTD_POWER_CNTL 0x052a #define regTD_POWER_CNTL_BASE_IDX 0 +#define regTD_UE_EDC_LO 0x052b +#define regTD_UE_EDC_LO_BASE_IDX 0 +#define regTD_UE_EDC_HI 0x052c +#define regTD_UE_EDC_HI_BASE_IDX 0 +#define regTD_CE_EDC_LO 0x052d +#define regTD_CE_EDC_LO_BASE_IDX 0 +#define regTD_CE_EDC_HI 0x052e +#define regTD_CE_EDC_HI_BASE_IDX 0 #define regTD_DSM_CNTL 0x052f #define regTD_DSM_CNTL_BASE_IDX 0 #define regTD_DSM_CNTL2 0x0530 @@ -893,6 +949,14 @@ #define regTA_DSM_CNTL_BASE_IDX 0 #define regTA_DSM_CNTL2 0x0585 #define regTA_DSM_CNTL2_BASE_IDX 0 +#define regTA_UE_EDC_LO 0x0587 +#define regTA_UE_EDC_LO_BASE_IDX 0 +#define regTA_UE_EDC_HI 0x0588 +#define regTA_UE_EDC_HI_BASE_IDX 0 +#define regTA_CE_EDC_LO 0x0589 +#define regTA_CE_EDC_LO_BASE_IDX 0 +#define regTA_CE_EDC_HI 0x058a +#define regTA_CE_EDC_HI_BASE_IDX 0 // addressBlock: xcd0_gc_gdsdec @@ -923,6 +987,14 @@ #define regGDS_DSM_CNTL2_BASE_IDX 0 #define regGDS_WD_GDS_CSB 0x05ce #define regGDS_WD_GDS_CSB_BASE_IDX 0 +#define regGDS_UE_ERR_STATUS_LO 0x05cf +#define regGDS_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regGDS_UE_ERR_STATUS_HI 0x05d0 +#define regGDS_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regGDS_CE_ERR_STATUS_LO 0x05d1 +#define regGDS_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regGDS_CE_ERR_STATUS_HI 0x05d2 +#define regGDS_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: xcd0_gc_rbdec @@ -1243,6 +1315,10 @@ #define regGCEA_MAM_CTRL_BASE_IDX 0 #define regGCEA_MAM_CTRL2 0x0702 #define regGCEA_MAM_CTRL2_BASE_IDX 0 +#define regGCEA_UE_ERR_STATUS_LO 0x0706 +#define regGCEA_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regGCEA_UE_ERR_STATUS_HI 0x0707 +#define regGCEA_UE_ERR_STATUS_HI_BASE_IDX 0 #define regGCEA_DSM_CNTL 0x0708 #define regGCEA_DSM_CNTL_BASE_IDX 0 #define regGCEA_DSM_CNTLA 0x0709 @@ -1277,6 +1353,10 @@ #define regGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0 #define regGCEA_SDP_BACKDOOR_MISCCREDITS 0x0719 #define regGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0 +#define regGCEA_CE_ERR_STATUS_LO 0x071b +#define regGCEA_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regGCEA_CE_ERR_STATUS_HI 0x071d +#define regGCEA_CE_ERR_STATUS_HI_BASE_IDX 0 #define regGCEA_SDP_ENABLE 0x071f #define regGCEA_SDP_ENABLE_BASE_IDX 0 @@ -1389,6 +1469,14 @@ #define regATC_L2_CNTL4_BASE_IDX 0 #define regATC_L2_MM_GROUP_RT_CLASSES 0x0816 #define regATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0 +#define regATC_L2_UE_ERR_STATUS_LO 0x081a +#define regATC_L2_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regATC_L2_UE_ERR_STATUS_HI 0x081b +#define regATC_L2_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regATC_L2_CE_ERR_STATUS_LO 0x081c +#define regATC_L2_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regATC_L2_CE_ERR_STATUS_HI 0x081d +#define regATC_L2_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: xcd0_gc_utcl2_vml2pfdec @@ -1475,6 +1563,30 @@ #define regUTCL2_EDC_MODE_BASE_IDX 0 #define regUTCL2_EDC_CONFIG 0x084c #define regUTCL2_EDC_CONFIG_BASE_IDX 0 +#define regVML2_UE_ERR_STATUS_LO 0x084d +#define regVML2_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regVML2_WALKER_UE_ERR_STATUS_LO 0x084e +#define regVML2_WALKER_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regUTCL2_UE_ERR_STATUS_LO 0x084f +#define regUTCL2_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regVML2_UE_ERR_STATUS_HI 0x0850 +#define regVML2_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regVML2_WALKER_UE_ERR_STATUS_HI 0x0851 +#define regVML2_WALKER_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regUTCL2_UE_ERR_STATUS_HI 0x0852 +#define regUTCL2_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regVML2_CE_ERR_STATUS_LO 0x0853 +#define regVML2_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regVML2_WALKER_CE_ERR_STATUS_LO 0x0854 +#define regVML2_WALKER_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regUTCL2_CE_ERR_STATUS_LO 0x0855 +#define regUTCL2_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regVML2_CE_ERR_STATUS_HI 0x0856 +#define regVML2_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regVML2_WALKER_CE_ERR_STATUS_HI 0x0857 +#define regVML2_WALKER_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regUTCL2_CE_ERR_STATUS_HI 0x0858 +#define regUTCL2_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: xcd0_gc_utcl2_vml2vcdec @@ -2011,6 +2123,22 @@ #define regTC_CFG_L1_VOLATILE_BASE_IDX 0 #define regTC_CFG_L2_VOLATILE 0x0b23 #define regTC_CFG_L2_VOLATILE_BASE_IDX 0 +#define regTCP_UE_EDC_HI_REG 0x0b54 +#define regTCP_UE_EDC_HI_REG_BASE_IDX 0 +#define regTCP_UE_EDC_LO_REG 0x0b55 +#define regTCP_UE_EDC_LO_REG_BASE_IDX 0 +#define regTCP_CE_EDC_HI_REG 0x0b56 +#define regTCP_CE_EDC_HI_REG_BASE_IDX 0 +#define regTCP_CE_EDC_LO_REG 0x0b57 +#define regTCP_CE_EDC_LO_REG_BASE_IDX 0 +#define regTCI_UE_EDC_HI_REG 0x0b58 +#define regTCI_UE_EDC_HI_REG_BASE_IDX 0 +#define regTCI_UE_EDC_LO_REG 0x0b59 +#define regTCI_UE_EDC_LO_REG_BASE_IDX 0 +#define regTCI_CE_EDC_HI_REG 0x0b5a +#define regTCI_CE_EDC_HI_REG_BASE_IDX 0 +#define regTCI_CE_EDC_LO_REG 0x0b5b +#define regTCI_CE_EDC_LO_REG_BASE_IDX 0 #define regTCI_MISC 0x0b5c #define regTCI_MISC_BASE_IDX 0 #define regTCI_CNTL_3 0x0b5d @@ -2061,6 +2189,26 @@ #define regTCX_DSM_CNTL_BASE_IDX 0 #define regTCX_DSM_CNTL2 0x0bc8 #define regTCX_DSM_CNTL2_BASE_IDX 0 +#define regTCA_UE_ERR_STATUS_LO 0x0bc9 +#define regTCA_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regTCA_UE_ERR_STATUS_HI 0x0bca +#define regTCA_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regTCX_UE_ERR_STATUS_LO 0x0bcb +#define regTCX_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regTCX_UE_ERR_STATUS_HI 0x0bcc +#define regTCX_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regTCX_CE_ERR_STATUS_LO 0x0bcd +#define regTCX_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regTCX_CE_ERR_STATUS_HI 0x0bce +#define regTCX_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regTCC_UE_ERR_STATUS_LO 0x0bcf +#define regTCC_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regTCC_UE_ERR_STATUS_HI 0x0bd0 +#define regTCC_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regTCC_CE_ERR_STATUS_LO 0x0bd1 +#define regTCC_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regTCC_CE_ERR_STATUS_HI 0x0bd2 +#define regTCC_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: xcd0_gc_shdec @@ -2905,6 +3053,30 @@ #define regCP_MEC2_F32_INT_DIS_BASE_IDX 0 #define regCP_VMID_STATUS 0x10bf #define regCP_VMID_STATUS_BASE_IDX 0 +#define regCPC_UE_ERR_STATUS_LO 0x10e0 +#define regCPC_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regCPC_UE_ERR_STATUS_HI 0x10e1 +#define regCPC_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regCPC_CE_ERR_STATUS_LO 0x10e2 +#define regCPC_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regCPC_CE_ERR_STATUS_HI 0x10e3 +#define regCPC_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regCPF_UE_ERR_STATUS_LO 0x10e4 +#define regCPF_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regCPF_UE_ERR_STATUS_HI 0x10e5 +#define regCPF_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regCPF_CE_ERR_STATUS_LO 0x10e6 +#define regCPF_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regCPF_CE_ERR_STATUS_HI 0x10e7 +#define regCPF_CE_ERR_STATUS_HI_BASE_IDX 0 +#define regCPG_UE_ERR_STATUS_LO 0x10e8 +#define regCPG_UE_ERR_STATUS_LO_BASE_IDX 0 +#define regCPG_UE_ERR_STATUS_HI 0x10e9 +#define regCPG_UE_ERR_STATUS_HI_BASE_IDX 0 +#define regCPG_CE_ERR_STATUS_LO 0x10ea +#define regCPG_CE_ERR_STATUS_LO_BASE_IDX 0 +#define regCPG_CE_ERR_STATUS_HI 0x10eb +#define regCPG_CE_ERR_STATUS_HI_BASE_IDX 0 // addressBlock: xcd0_gc_cppdec2 @@ -5364,6 +5536,18 @@ #define regSPI_WAVE_LIMIT_CNTL 0x2443 #define regSPI_WAVE_LIMIT_CNTL_BASE_IDX 1 +// addressBlock: xcd0_gc_gccanedec +// base address: 0x33d00 +#define regGC_CANE_ERR_STATUS 0x2f4d +#define regGC_CANE_ERR_STATUS_BASE_IDX 1 +#define regGC_CANE_UE_ERR_STATUS_LO 0x2f4e +#define regGC_CANE_UE_ERR_STATUS_LO_BASE_IDX 1 +#define regGC_CANE_UE_ERR_STATUS_HI 0x2f4f +#define regGC_CANE_UE_ERR_STATUS_HI_BASE_IDX 1 +#define regGC_CANE_CE_ERR_STATUS_LO 0x2f50 +#define regGC_CANE_CE_ERR_STATUS_LO_BASE_IDX 1 +#define regGC_CANE_CE_ERR_STATUS_HI 0x2f51 +#define regGC_CANE_CE_ERR_STATUS_HI_BASE_IDX 1 // addressBlock: xcd0_gc_perfddec // base address: 0x34000 @@ -6583,6 +6767,10 @@ #define regRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1 #define regRLC_CPG_STAT_INVAL 0x4d09 #define regRLC_CPG_STAT_INVAL_BASE_IDX 1 +#define regRLC_UE_ERR_STATUS_LOW 0x4d40 +#define regRLC_UE_ERR_STATUS_LOW_BASE_IDX 1 +#define regRLC_UE_ERR_STATUS_HIGH 0x4d41 +#define regRLC_UE_ERR_STATUS_HIGH_BASE_IDX 1 #define regRLC_DSM_CNTL 0x4d42 #define regRLC_DSM_CNTL_BASE_IDX 1 #define regRLC_DSM_CNTLA 0x4d43 @@ -6591,6 +6779,10 @@ #define regRLC_DSM_CNTL2_BASE_IDX 1 #define regRLC_DSM_CNTL2A 0x4d45 #define regRLC_DSM_CNTL2A_BASE_IDX 1 +#define regRLC_CE_ERR_STATUS_LOW 0x4d49 +#define regRLC_CE_ERR_STATUS_LOW_BASE_IDX 1 +#define regRLC_CE_ERR_STATUS_HIGH 0x4d4a +#define regRLC_CE_ERR_STATUS_HIGH_BASE_IDX 1 #define regRLC_RLCV_SPARE_INT 0x4f30 #define regRLC_RLCV_SPARE_INT_BASE_IDX 1 #define regRLC_SMU_CLK_REQ 0x4f97 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h index 84a75b58347f..2bd9f3f1026f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h @@ -4129,6 +4129,240 @@ #define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L #define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L #define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L +//SQC_UE_EDC_LO +#define SQC_UE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define SQC_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define SQC_UE_EDC_LO__ADDRESS__SHIFT 0x2 +#define SQC_UE_EDC_LO__MEM_ID__SHIFT 0x18 +#define SQC_UE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define SQC_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define SQC_UE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL +#define SQC_UE_EDC_LO__MEM_ID_MASK 0xFF000000L +//SQC_UE_EDC_HI +#define SQC_UE_EDC_HI__ECC__SHIFT 0x0 +#define SQC_UE_EDC_HI__PARITY__SHIFT 0x1 +#define SQC_UE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SQC_UE_EDC_HI__ERR_INFO__SHIFT 0x3 +#define SQC_UE_EDC_HI__UE_CNT__SHIFT 0x17 +#define SQC_UE_EDC_HI__FED_CNT__SHIFT 0x1a +#define SQC_UE_EDC_HI__ECC_MASK 0x00000001L +#define SQC_UE_EDC_HI__PARITY_MASK 0x00000002L +#define SQC_UE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SQC_UE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L +#define SQC_UE_EDC_HI__UE_CNT_MASK 0x03800000L +#define SQC_UE_EDC_HI__FED_CNT_MASK 0x1C000000L +//SQC_CE_EDC_LO +#define SQC_CE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define SQC_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define SQC_CE_EDC_LO__ADDRESS__SHIFT 0x2 +#define SQC_CE_EDC_LO__MEM_ID__SHIFT 0x18 +#define SQC_CE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define SQC_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define SQC_CE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL +#define SQC_CE_EDC_LO__MEM_ID_MASK 0xFF000000L +//SQC_CE_EDC_HI +#define SQC_CE_EDC_HI__ECC__SHIFT 0x0 +#define SQC_CE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SQC_CE_EDC_HI__ERR_INFO__SHIFT 0x3 +#define SQC_CE_EDC_HI__CE_CNT__SHIFT 0x17 +#define SQC_CE_EDC_HI__POSION__SHIFT 0x1a +#define SQC_CE_EDC_HI__ECC_MASK 0x00000001L +#define SQC_CE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SQC_CE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L +#define SQC_CE_EDC_HI__CE_CNT_MASK 0x03800000L +#define SQC_CE_EDC_HI__POSION_MASK 0x04000000L +//SQ_UE_ERR_STATUS_LO +#define SQ_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SQ_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SQ_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SQ_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SQ_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SQ_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SQ_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SQ_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SQ_UE_ERR_STATUS_HI +#define SQ_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SQ_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define SQ_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SQ_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SQ_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define SQ_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define SQ_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define SQ_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SQ_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define SQ_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SQ_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SQ_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define SQ_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define SQ_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//SQ_CE_ERR_STATUS_LO +#define SQ_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SQ_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SQ_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SQ_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SQ_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SQ_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SQ_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SQ_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SQ_CE_ERR_STATUS_HI +#define SQ_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SQ_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define SQ_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SQ_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SQ_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define SQ_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define SQ_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define SQ_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SQ_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define SQ_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SQ_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SQ_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define SQ_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define SQ_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//LDS_UE_ERR_STATUS_LO +#define LDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define LDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define LDS_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define LDS_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define LDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define LDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define LDS_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define LDS_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//LDS_UE_ERR_STATUS_HI +#define LDS_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define LDS_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define LDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define LDS_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define LDS_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define LDS_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define LDS_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define LDS_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define LDS_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define LDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define LDS_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define LDS_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define LDS_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define LDS_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//LDS_CE_ERR_STATUS_LO +#define LDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define LDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define LDS_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define LDS_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define LDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define LDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define LDS_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define LDS_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//LDS_CE_ERR_STATUS_HI +#define LDS_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define LDS_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define LDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define LDS_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define LDS_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define LDS_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define LDS_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define LDS_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define LDS_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define LDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define LDS_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define LDS_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define LDS_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define LDS_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//SP0_UE_ERR_STATUS_LO +#define SP0_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SP0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SP0_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SP0_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SP0_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SP0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SP0_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SP0_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SP0_UE_ERR_STATUS_HI +#define SP0_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SP0_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define SP0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SP0_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SP0_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define SP0_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define SP0_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define SP0_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SP0_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define SP0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SP0_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SP0_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define SP0_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define SP0_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//SP0_CE_ERR_STATUS_LO +#define SP0_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SP0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SP0_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SP0_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SP0_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SP0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SP0_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SP0_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SP0_CE_ERR_STATUS_HI +#define SP0_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SP0_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define SP0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SP0_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SP0_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define SP0_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define SP0_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define SP0_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SP0_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define SP0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SP0_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SP0_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define SP0_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define SP0_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//SP1_UE_ERR_STATUS_LO +#define SP1_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SP1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SP1_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SP1_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SP1_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SP1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SP1_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SP1_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SP1_UE_ERR_STATUS_HI +#define SP1_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SP1_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define SP1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SP1_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SP1_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define SP1_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define SP1_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define SP1_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SP1_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define SP1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SP1_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SP1_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define SP1_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define SP1_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//SP1_CE_ERR_STATUS_LO +#define SP1_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SP1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SP1_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SP1_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SP1_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SP1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SP1_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SP1_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SP1_CE_ERR_STATUS_HI +#define SP1_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SP1_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define SP1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SP1_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SP1_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define SP1_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define SP1_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define SP1_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SP1_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define SP1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SP1_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SP1_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define SP1_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define SP1_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L // addressBlock: xcd0_gc_shsdec @@ -4235,6 +4469,54 @@ #define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L #define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L #define SPI_EDC_CNT__UNUSED_MASK 0xFFF00000L +//SPI_UE_ERR_STATUS_LO +#define SPI_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SPI_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SPI_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SPI_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SPI_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SPI_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SPI_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SPI_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SPI_UE_ERR_STATUS_HI +#define SPI_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SPI_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define SPI_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SPI_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SPI_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define SPI_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define SPI_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define SPI_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SPI_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define SPI_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SPI_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SPI_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define SPI_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define SPI_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//SPI_CE_ERR_STATUS_LO +#define SPI_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define SPI_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define SPI_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define SPI_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define SPI_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define SPI_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define SPI_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define SPI_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//SPI_CE_ERR_STATUS_HI +#define SPI_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define SPI_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define SPI_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define SPI_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define SPI_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define SPI_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define SPI_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define SPI_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define SPI_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define SPI_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define SPI_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define SPI_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define SPI_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define SPI_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L //SPI_DEBUG_BUSY #define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0 #define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1 @@ -4622,6 +4904,48 @@ #define TD_POWER_CNTL__MGCG_OUTPUTSTAGE_MASK 0x00000002L #define TD_POWER_CNTL__MID0_THREAD_DATA_MASK 0x00000004L #define TD_POWER_CNTL__MID2_ACCUM_DATA_MASK 0x00000008L +//TD_UE_EDC_LO +#define TD_UE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define TD_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TD_UE_EDC_LO__ADDRESS__SHIFT 0x2 +#define TD_UE_EDC_LO__MEM_ID__SHIFT 0x18 +#define TD_UE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define TD_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TD_UE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL +#define TD_UE_EDC_LO__MEM_ID_MASK 0xFF000000L +//TD_UE_EDC_HI +#define TD_UE_EDC_HI__ECC__SHIFT 0x0 +#define TD_UE_EDC_HI__PARITY__SHIFT 0x1 +#define TD_UE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TD_UE_EDC_HI__ERR_INFO__SHIFT 0x3 +#define TD_UE_EDC_HI__UE_CNT__SHIFT 0x17 +#define TD_UE_EDC_HI__FED_CNT__SHIFT 0x1a +#define TD_UE_EDC_HI__ECC_MASK 0x00000001L +#define TD_UE_EDC_HI__PARITY_MASK 0x00000002L +#define TD_UE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TD_UE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L +#define TD_UE_EDC_HI__UE_CNT_MASK 0x03800000L +#define TD_UE_EDC_HI__FED_CNT_MASK 0x1C000000L +//TD_CE_EDC_LO +#define TD_CE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define TD_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TD_CE_EDC_LO__ADDRESS__SHIFT 0x2 +#define TD_CE_EDC_LO__MEM_ID__SHIFT 0x18 +#define TD_CE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define TD_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TD_CE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL +#define TD_CE_EDC_LO__MEM_ID_MASK 0xFF000000L +//TD_CE_EDC_HI +#define TD_CE_EDC_HI__ECC__SHIFT 0x0 +#define TD_CE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TD_CE_EDC_HI__ERR_INFO__SHIFT 0x3 +#define TD_CE_EDC_HI__CE_CNT__SHIFT 0x17 +#define TD_CE_EDC_HI__POISON__SHIFT 0x1a +#define TD_CE_EDC_HI__ECC_MASK 0x00000001L +#define TD_CE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TD_CE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L +#define TD_CE_EDC_HI__CE_CNT_MASK 0x03800000L +#define TD_CE_EDC_HI__POISON_MASK 0x04000000L //TD_DSM_CNTL #define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0x0 #define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -4771,6 +5095,48 @@ #define TA_DSM_CNTL2__TA_FS_AFIFO_HI_ENABLE_ERROR_INJECT_MASK 0x000C0000L #define TA_DSM_CNTL2__TA_FS_AFIFO_HI_SELECT_INJECT_DELAY_MASK 0x00100000L #define TA_DSM_CNTL2__TA_INJECT_DELAY_MASK 0xFC000000L +//TA_UE_EDC_LO +#define TA_UE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define TA_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TA_UE_EDC_LO__ADDRESS__SHIFT 0x2 +#define TA_UE_EDC_LO__MEM_ID__SHIFT 0x18 +#define TA_UE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define TA_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TA_UE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL +#define TA_UE_EDC_LO__MEM_ID_MASK 0xFF000000L +//TA_UE_EDC_HI +#define TA_UE_EDC_HI__ECC__SHIFT 0x0 +#define TA_UE_EDC_HI__PARITY__SHIFT 0x1 +#define TA_UE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TA_UE_EDC_HI__ERR_INFO__SHIFT 0x3 +#define TA_UE_EDC_HI__UE_CNT__SHIFT 0x17 +#define TA_UE_EDC_HI__FED_CNT__SHIFT 0x1a +#define TA_UE_EDC_HI__ECC_MASK 0x00000001L +#define TA_UE_EDC_HI__PARITY_MASK 0x00000002L +#define TA_UE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TA_UE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L +#define TA_UE_EDC_HI__UE_CNT_MASK 0x03800000L +#define TA_UE_EDC_HI__FED_CNT_MASK 0x1C000000L +//TA_CE_EDC_LO +#define TA_CE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define TA_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TA_CE_EDC_LO__ADDRESS__SHIFT 0x2 +#define TA_CE_EDC_LO__MEM_ID__SHIFT 0x18 +#define TA_CE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define TA_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TA_CE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL +#define TA_CE_EDC_LO__MEM_ID_MASK 0xFF000000L +//TA_CE_EDC_HI +#define TA_CE_EDC_HI__ECC__SHIFT 0x0 +#define TA_CE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TA_CE_EDC_HI__ERR_INFO__SHIFT 0x3 +#define TA_CE_EDC_HI__CE_CNT__SHIFT 0x17 +#define TA_CE_EDC_HI__POISON__SHIFT 0x1a +#define TA_CE_EDC_HI__ECC_MASK 0x00000001L +#define TA_CE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TA_CE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L +#define TA_CE_EDC_HI__CE_CNT_MASK 0x03800000L +#define TA_CE_EDC_HI__POISON_MASK 0x04000000L // addressBlock: xcd0_gc_gdsdec @@ -5015,6 +5381,54 @@ #define GDS_WD_GDS_CSB__UNUSED__SHIFT 0xd #define GDS_WD_GDS_CSB__COUNTER_MASK 0x00001FFFL #define GDS_WD_GDS_CSB__UNUSED_MASK 0xFFFFE000L +//GDS_UE_ERR_STATUS_LO +#define GDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define GDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define GDS_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define GDS_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define GDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define GDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define GDS_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define GDS_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//GDS_UE_ERR_STATUS_HI +#define GDS_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define GDS_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define GDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define GDS_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define GDS_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define GDS_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define GDS_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define GDS_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define GDS_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define GDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define GDS_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define GDS_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define GDS_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define GDS_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//GDS_CE_ERR_STATUS_LO +#define GDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define GDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define GDS_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define GDS_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define GDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define GDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define GDS_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define GDS_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//GDS_CE_ERR_STATUS_HI +#define GDS_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define GDS_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define GDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define GDS_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define GDS_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define GDS_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define GDS_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define GDS_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define GDS_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define GDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define GDS_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define GDS_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define GDS_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define GDS_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L // addressBlock: xcd0_gc_rbdec @@ -7370,6 +7784,30 @@ #define GCEA_MAM_CTRL2__ARAM_FLUSH_NOALLOC_MASK 0x00000040L #define GCEA_MAM_CTRL2__RESERVED_FIELD_MASK 0x00FFFF80L #define GCEA_MAM_CTRL2__ADDR_HI_MASK 0xFF000000L +//GCEA_UE_ERR_STATUS_LO +#define GCEA_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define GCEA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define GCEA_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define GCEA_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define GCEA_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define GCEA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define GCEA_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define GCEA_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//GCEA_UE_ERR_STATUS_HI +#define GCEA_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define GCEA_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define GCEA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define GCEA_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define GCEA_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define GCEA_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define GCEA_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d +#define GCEA_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define GCEA_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define GCEA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define GCEA_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define GCEA_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define GCEA_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define GCEA_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L //GCEA_DSM_CNTL #define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -7745,6 +8183,30 @@ #define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L #define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED_MASK 0x007F0000L #define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x3F800000L +//GCEA_CE_ERR_STATUS_LO +#define GCEA_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0 +#define GCEA_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define GCEA_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define GCEA_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define GCEA_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L +#define GCEA_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define GCEA_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define GCEA_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//GCEA_CE_ERR_STATUS_HI +#define GCEA_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1 +#define GCEA_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define GCEA_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define GCEA_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define GCEA_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b +#define GCEA_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L +#define GCEA_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define GCEA_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define GCEA_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define GCEA_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L //GCEA_SDP_ENABLE #define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0 #define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L @@ -8440,6 +8902,54 @@ //ATC_L2_MM_GROUP_RT_CLASSES #define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0 #define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL +//ATC_L2_UE_ERR_STATUS_LO +#define ATC_L2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define ATC_L2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define ATC_L2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define ATC_L2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//ATC_L2_UE_ERR_STATUS_HI +#define ATC_L2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define ATC_L2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define ATC_L2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define ATC_L2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define ATC_L2_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define ATC_L2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define ATC_L2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define ATC_L2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define ATC_L2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define ATC_L2_UE_ERR_STATUS_HI__RESERVED_MASK 0x60000000L +//ATC_L2_CE_ERR_STATUS_LO +#define ATC_L2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define ATC_L2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define ATC_L2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define ATC_L2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//ATC_L2_CE_ERR_STATUS_HI +#define ATC_L2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define ATC_L2_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define ATC_L2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define ATC_L2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define ATC_L2_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define ATC_L2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define ATC_L2_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define ATC_L2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define ATC_L2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define ATC_L2_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L // addressBlock: xcd0_gc_utcl2_vml2pfdec @@ -8888,6 +9398,150 @@ #define UTCL2_EDC_CONFIG__DIS_EDC__SHIFT 0x1 #define UTCL2_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L #define UTCL2_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +//VML2_UE_ERR_STATUS_LO +#define VML2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define VML2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define VML2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define VML2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define VML2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define VML2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define VML2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define VML2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//VML2_WALKER_UE_ERR_STATUS_LO +#define VML2_WALKER_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define VML2_WALKER_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define VML2_WALKER_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define VML2_WALKER_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//UTCL2_UE_ERR_STATUS_LO +#define UTCL2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define UTCL2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define UTCL2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define UTCL2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define UTCL2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define UTCL2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define UTCL2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define UTCL2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//VML2_UE_ERR_STATUS_HI +#define VML2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define VML2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define VML2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define VML2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define VML2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define VML2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define VML2_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define VML2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define VML2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define VML2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define VML2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define VML2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define VML2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define VML2_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//VML2_WALKER_UE_ERR_STATUS_HI +#define VML2_WALKER_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define VML2_WALKER_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define VML2_WALKER_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define VML2_WALKER_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define VML2_WALKER_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define VML2_WALKER_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define VML2_WALKER_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define VML2_WALKER_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define VML2_WALKER_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define VML2_WALKER_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//UTCL2_UE_ERR_STATUS_HI +#define UTCL2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define UTCL2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define UTCL2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define UTCL2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define UTCL2_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define UTCL2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define UTCL2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define UTCL2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define UTCL2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define UTCL2_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//VML2_CE_ERR_STATUS_LO +#define VML2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define VML2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define VML2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define VML2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define VML2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define VML2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define VML2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define VML2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//VML2_WALKER_CE_ERR_STATUS_LO +#define VML2_WALKER_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define VML2_WALKER_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define VML2_WALKER_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define VML2_WALKER_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//UTCL2_CE_ERR_STATUS_LO +#define UTCL2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define UTCL2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define UTCL2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define UTCL2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define UTCL2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define UTCL2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define UTCL2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define UTCL2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//VML2_CE_ERR_STATUS_HI +#define VML2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define VML2_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define VML2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define VML2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define VML2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define VML2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define VML2_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define VML2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define VML2_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define VML2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define VML2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define VML2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define VML2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define VML2_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//VML2_WALKER_CE_ERR_STATUS_HI +#define VML2_WALKER_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define VML2_WALKER_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define VML2_WALKER_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define VML2_WALKER_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define VML2_WALKER_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define VML2_WALKER_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define VML2_WALKER_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define VML2_WALKER_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define VML2_WALKER_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define VML2_WALKER_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//UTCL2_CE_ERR_STATUS_HI +#define UTCL2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define UTCL2_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define UTCL2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define UTCL2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define UTCL2_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define UTCL2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define UTCL2_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define UTCL2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define UTCL2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define UTCL2_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L // addressBlock: xcd0_gc_utcl2_vml2vcdec @@ -11139,6 +11793,98 @@ //TC_CFG_L2_VOLATILE #define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0 #define TC_CFG_L2_VOLATILE__VOL_MASK 0x0000000FL +//TCP_UE_EDC_HI_REG +#define TCP_UE_EDC_HI_REG__ECC__SHIFT 0x0 +#define TCP_UE_EDC_HI_REG__PARITY__SHIFT 0x1 +#define TCP_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCP_UE_EDC_HI_REG__ERR_INFO__SHIFT 0x3 +#define TCP_UE_EDC_HI_REG__UE_CNT__SHIFT 0x17 +#define TCP_UE_EDC_HI_REG__FED_CNT__SHIFT 0x1a +#define TCP_UE_EDC_HI_REG__RESERVED__SHIFT 0x1d +#define TCP_UE_EDC_HI_REG__ECC_MASK 0x00000001L +#define TCP_UE_EDC_HI_REG__PARITY_MASK 0x00000002L +#define TCP_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCP_UE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L +#define TCP_UE_EDC_HI_REG__UE_CNT_MASK 0x03800000L +#define TCP_UE_EDC_HI_REG__FED_CNT_MASK 0x1C000000L +#define TCP_UE_EDC_HI_REG__RESERVED_MASK 0xE0000000L +//TCP_UE_EDC_LO_REG +#define TCP_UE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0 +#define TCP_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TCP_UE_EDC_LO_REG__ADDRESS__SHIFT 0x2 +#define TCP_UE_EDC_LO_REG__MEM_ID__SHIFT 0x18 +#define TCP_UE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L +#define TCP_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TCP_UE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL +#define TCP_UE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L +//TCP_CE_EDC_HI_REG +#define TCP_CE_EDC_HI_REG__ECC__SHIFT 0x0 +#define TCP_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCP_CE_EDC_HI_REG__ERR_INFO__SHIFT 0x3 +#define TCP_CE_EDC_HI_REG__CE_CNT__SHIFT 0x17 +#define TCP_CE_EDC_HI_REG__POISON__SHIFT 0x1a +#define TCP_CE_EDC_HI_REG__RESERVED__SHIFT 0x1b +#define TCP_CE_EDC_HI_REG__ECC_MASK 0x00000001L +#define TCP_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCP_CE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L +#define TCP_CE_EDC_HI_REG__CE_CNT_MASK 0x03800000L +#define TCP_CE_EDC_HI_REG__POISON_MASK 0x04000000L +#define TCP_CE_EDC_HI_REG__RESERVED_MASK 0xF8000000L +//TCP_CE_EDC_LO_REG +#define TCP_CE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0 +#define TCP_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TCP_CE_EDC_LO_REG__ADDRESS__SHIFT 0x2 +#define TCP_CE_EDC_LO_REG__MEM_ID__SHIFT 0x18 +#define TCP_CE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L +#define TCP_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TCP_CE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL +#define TCP_CE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L +//TCI_UE_EDC_HI_REG +#define TCI_UE_EDC_HI_REG__ECC__SHIFT 0x0 +#define TCI_UE_EDC_HI_REG__PARITY__SHIFT 0x1 +#define TCI_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCI_UE_EDC_HI_REG__ERR_INFO__SHIFT 0x3 +#define TCI_UE_EDC_HI_REG__UE_CNT__SHIFT 0x17 +#define TCI_UE_EDC_HI_REG__FED_CNT__SHIFT 0x1a +#define TCI_UE_EDC_HI_REG__RESERVED__SHIFT 0x1d +#define TCI_UE_EDC_HI_REG__ECC_MASK 0x00000001L +#define TCI_UE_EDC_HI_REG__PARITY_MASK 0x00000002L +#define TCI_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCI_UE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L +#define TCI_UE_EDC_HI_REG__UE_CNT_MASK 0x03800000L +#define TCI_UE_EDC_HI_REG__FED_CNT_MASK 0x1C000000L +#define TCI_UE_EDC_HI_REG__RESERVED_MASK 0xE0000000L +//TCI_UE_EDC_LO_REG +#define TCI_UE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0 +#define TCI_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TCI_UE_EDC_LO_REG__ADDRESS__SHIFT 0x2 +#define TCI_UE_EDC_LO_REG__MEM_ID__SHIFT 0x18 +#define TCI_UE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L +#define TCI_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TCI_UE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL +#define TCI_UE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L +//TCI_CE_EDC_HI_REG +#define TCI_CE_EDC_HI_REG__ECC__SHIFT 0x0 +#define TCI_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCI_CE_EDC_HI_REG__ERR_INFO__SHIFT 0x3 +#define TCI_CE_EDC_HI_REG__CE_CNT__SHIFT 0x17 +#define TCI_CE_EDC_HI_REG__POISON__SHIFT 0x1a +#define TCI_CE_EDC_HI_REG__RESERVED__SHIFT 0x1b +#define TCI_CE_EDC_HI_REG__ECC_MASK 0x00000001L +#define TCI_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCI_CE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L +#define TCI_CE_EDC_HI_REG__CE_CNT_MASK 0x03800000L +#define TCI_CE_EDC_HI_REG__POISON_MASK 0x04000000L +#define TCI_CE_EDC_HI_REG__RESERVED_MASK 0xF8000000L +//TCI_CE_EDC_LO_REG +#define TCI_CE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0 +#define TCI_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1 +#define TCI_CE_EDC_LO_REG__ADDRESS__SHIFT 0x2 +#define TCI_CE_EDC_LO_REG__MEM_ID__SHIFT 0x18 +#define TCI_CE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L +#define TCI_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L +#define TCI_CE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL +#define TCI_CE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L //TCI_MISC #define TCI_MISC__FGCG_REPEATER_DISABLE__SHIFT 0x0 #define TCI_MISC__LEGACY_MGCG_DISABLE__SHIFT 0x1 @@ -11560,6 +12306,112 @@ #define TCX_DSM_CNTL2__SED_ENABLE_ERROR_INJECT_MASK 0x00000003L #define TCX_DSM_CNTL2__SED_SELECT_INJECT_DELAY_MASK 0x00000004L #define TCX_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//TCA_UE_ERR_STATUS_LO +#define TCA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define TCA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define TCA_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define TCA_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define TCA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define TCA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define TCA_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define TCA_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//TCA_UE_ERR_STATUS_HI +#define TCA_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define TCA_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define TCA_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCA_UE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3 +#define TCA_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define TCA_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define TCA_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define TCA_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define TCA_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCA_UE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L +#define TCA_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define TCA_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +//TCX_UE_ERR_STATUS_LO +#define TCX_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define TCX_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define TCX_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define TCX_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define TCX_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define TCX_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define TCX_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define TCX_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//TCX_UE_ERR_STATUS_HI +#define TCX_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define TCX_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define TCX_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCX_UE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3 +#define TCX_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define TCX_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define TCX_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define TCX_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define TCX_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCX_UE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L +#define TCX_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define TCX_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +//TCX_CE_ERR_STATUS_LO +#define TCX_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define TCX_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define TCX_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define TCX_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define TCX_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define TCX_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define TCX_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define TCX_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//TCX_CE_ERR_STATUS_HI +#define TCX_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define TCX_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCX_CE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3 +#define TCX_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define TCX_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define TCX_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define TCX_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCX_CE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L +#define TCX_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define TCX_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +//TCC_UE_ERR_STATUS_LO +#define TCC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define TCC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define TCC_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define TCC_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define TCC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define TCC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define TCC_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define TCC_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//TCC_UE_ERR_STATUS_HI +#define TCC_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define TCC_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define TCC_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCC_UE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3 +#define TCC_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define TCC_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define TCC_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define TCC_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define TCC_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCC_UE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L +#define TCC_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define TCC_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +//TCC_CE_ERR_STATUS_LO +#define TCC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define TCC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define TCC_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define TCC_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define TCC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define TCC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define TCC_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define TCC_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//TCC_CE_ERR_STATUS_HI +#define TCC_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define TCC_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2 +#define TCC_CE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3 +#define TCC_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define TCC_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define TCC_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define TCC_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L +#define TCC_CE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L +#define TCC_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define TCC_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L // addressBlock: xcd0_gc_shdec @@ -14384,6 +15236,150 @@ #define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10 #define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL #define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L +//CPC_UE_ERR_STATUS_LO +#define CPC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define CPC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define CPC_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define CPC_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define CPC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define CPC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define CPC_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define CPC_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//CPC_UE_ERR_STATUS_HI +#define CPC_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define CPC_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define CPC_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define CPC_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define CPC_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define CPC_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define CPC_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define CPC_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define CPC_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define CPC_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define CPC_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define CPC_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define CPC_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define CPC_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//CPC_CE_ERR_STATUS_LO +#define CPC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define CPC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define CPC_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define CPC_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define CPC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define CPC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define CPC_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define CPC_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//CPC_CE_ERR_STATUS_HI +#define CPC_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define CPC_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define CPC_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define CPC_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define CPC_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define CPC_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define CPC_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define CPC_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define CPC_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define CPC_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define CPC_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define CPC_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define CPC_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define CPC_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//CPF_UE_ERR_STATUS_LO +#define CPF_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define CPF_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define CPF_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define CPF_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define CPF_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define CPF_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define CPF_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define CPF_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//CPF_UE_ERR_STATUS_HI +#define CPF_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define CPF_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define CPF_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define CPF_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define CPF_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define CPF_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define CPF_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define CPF_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define CPF_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define CPF_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define CPF_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define CPF_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define CPF_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define CPF_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//CPF_CE_ERR_STATUS_LO +#define CPF_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define CPF_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define CPF_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define CPF_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define CPF_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define CPF_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define CPF_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define CPF_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//CPF_CE_ERR_STATUS_HI +#define CPF_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define CPF_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define CPF_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define CPF_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define CPF_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define CPF_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define CPF_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define CPF_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define CPF_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define CPF_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define CPF_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define CPF_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define CPF_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define CPF_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L +//CPG_UE_ERR_STATUS_LO +#define CPG_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define CPG_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define CPG_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define CPG_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define CPG_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define CPG_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define CPG_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define CPG_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//CPG_UE_ERR_STATUS_HI +#define CPG_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define CPG_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define CPG_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define CPG_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define CPG_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define CPG_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define CPG_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d +#define CPG_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define CPG_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define CPG_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define CPG_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define CPG_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define CPG_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +#define CPG_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L +//CPG_CE_ERR_STATUS_LO +#define CPG_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define CPG_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define CPG_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define CPG_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define CPG_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define CPG_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define CPG_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define CPG_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//CPG_CE_ERR_STATUS_HI +#define CPG_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define CPG_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1 +#define CPG_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define CPG_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define CPG_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define CPG_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define CPG_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b +#define CPG_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define CPG_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L +#define CPG_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define CPG_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define CPG_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define CPG_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L +#define CPG_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L // addressBlock: xcd0_gc_cppdec2 @@ -22764,6 +23760,74 @@ #define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L #define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L +// addressBlock: xcd0_gc_gccanedec +//GC_CANE_ERR_STATUS +#define GC_CANE_ERR_STATUS__SDPM_RDRSP_STATUS__SHIFT 0x0 +#define GC_CANE_ERR_STATUS__SDPM_WRRSP_STATUS__SHIFT 0x4 +#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS__SHIFT 0x8 +#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define GC_CANE_ERR_STATUS__SDPS_DAT_ERROR__SHIFT 0xb +#define GC_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR__SHIFT 0xc +#define GC_CANE_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xd +#define GC_CANE_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xe +#define GC_CANE_ERR_STATUS__BUSY_ON_UER_ERROR__SHIFT 0xf +#define GC_CANE_ERR_STATUS__FUE_FLAG__SHIFT 0x10 +#define GC_CANE_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0x11 +#define GC_CANE_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x12 +#define GC_CANE_ERR_STATUS__SDPM_RDRSP_STATUS_MASK 0x0000000FL +#define GC_CANE_ERR_STATUS__SDPM_WRRSP_STATUS_MASK 0x000000F0L +#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS_MASK 0x00000300L +#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define GC_CANE_ERR_STATUS__SDPS_DAT_ERROR_MASK 0x00000800L +#define GC_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR_MASK 0x00001000L +#define GC_CANE_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00002000L +#define GC_CANE_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00004000L +#define GC_CANE_ERR_STATUS__BUSY_ON_UER_ERROR_MASK 0x00008000L +#define GC_CANE_ERR_STATUS__FUE_FLAG_MASK 0x00010000L +#define GC_CANE_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00020000L +#define GC_CANE_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00040000L +//GC_CANE_UE_ERR_STATUS_LO +#define GC_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define GC_CANE_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define GC_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define GC_CANE_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//GC_CANE_UE_ERR_STATUS_HI +#define GC_CANE_UE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define GC_CANE_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1 +#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define GC_CANE_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17 +#define GC_CANE_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a +#define GC_CANE_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define GC_CANE_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L +#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define GC_CANE_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L +#define GC_CANE_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L +//GC_CANE_CE_ERR_STATUS_LO +#define GC_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2 +#define GC_CANE_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18 +#define GC_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL +#define GC_CANE_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L +//GC_CANE_CE_ERR_STATUS_HI +#define GC_CANE_CE_ERR_STATUS_HI__ECC__SHIFT 0x0 +#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3 +#define GC_CANE_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17 +#define GC_CANE_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a +#define GC_CANE_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L +#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L +#define GC_CANE_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L +#define GC_CANE_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L // addressBlock: xcd0_gc_perfddec //CPG_PERFCOUNTER1_LO @@ -26471,6 +27535,30 @@ //RLC_CPG_STAT_INVAL #define RLC_CPG_STAT_INVAL__CPG_stat_inval__SHIFT 0x0 #define RLC_CPG_STAT_INVAL__CPG_stat_inval_MASK 0x00000001L +//RLC_UE_ERR_STATUS_LOW +#define RLC_UE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define RLC_UE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define RLC_UE_ERR_STATUS_LOW__ADDRESS__SHIFT 0x2 +#define RLC_UE_ERR_STATUS_LOW__MEMORY_ID__SHIFT 0x18 +#define RLC_UE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define RLC_UE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define RLC_UE_ERR_STATUS_LOW__ADDRESS_MASK 0x00FFFFFCL +#define RLC_UE_ERR_STATUS_LOW__MEMORY_ID_MASK 0xFF000000L +//RLC_UE_ERR_STATUS_HIGH +#define RLC_UE_ERR_STATUS_HIGH__ECC__SHIFT 0x0 +#define RLC_UE_ERR_STATUS_HIGH__PARITY__SHIFT 0x1 +#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO__SHIFT 0x3 +#define RLC_UE_ERR_STATUS_HIGH__UE_CNT__SHIFT 0x17 +#define RLC_UE_ERR_STATUS_HIGH__FED_CNT__SHIFT 0x1a +#define RLC_UE_ERR_STATUS_HIGH__RESERVED__SHIFT 0x1d +#define RLC_UE_ERR_STATUS_HIGH__ECC_MASK 0x00000001L +#define RLC_UE_ERR_STATUS_HIGH__PARITY_MASK 0x00000002L +#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO_MASK 0x007FFFF8L +#define RLC_UE_ERR_STATUS_HIGH__UE_CNT_MASK 0x03800000L +#define RLC_UE_ERR_STATUS_HIGH__FED_CNT_MASK 0x1C000000L +#define RLC_UE_ERR_STATUS_HIGH__RESERVED_MASK 0xE0000000L //RLC_DSM_CNTL #define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0 #define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2 @@ -26573,6 +27661,30 @@ #define RLC_DSM_CNTL2A__RLC_SPM_SE2_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L #define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L #define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L +//RLC_CE_ERR_STATUS_LOW +#define RLC_CE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG__SHIFT 0x0 +#define RLC_CE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG__SHIFT 0x1 +#define RLC_CE_ERR_STATUS_LOW__ADDRESS__SHIFT 0x2 +#define RLC_CE_ERR_STATUS_LOW__MEMORY_ID__SHIFT 0x18 +#define RLC_CE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG_MASK 0x00000001L +#define RLC_CE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG_MASK 0x00000002L +#define RLC_CE_ERR_STATUS_LOW__ADDRESS_MASK 0x00FFFFFCL +#define RLC_CE_ERR_STATUS_LOW__MEMORY_ID_MASK 0xFF000000L +//RLC_CE_ERR_STATUS_HIGH +#define RLC_CE_ERR_STATUS_HIGH__ECC__SHIFT 0x0 +#define RLC_CE_ERR_STATUS_HIGH__OTHER__SHIFT 0x1 +#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG__SHIFT 0x2 +#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO__SHIFT 0x3 +#define RLC_CE_ERR_STATUS_HIGH__CE_CNT__SHIFT 0x17 +#define RLC_CE_ERR_STATUS_HIGH__POISON__SHIFT 0x1a +#define RLC_CE_ERR_STATUS_HIGH__RESERVED__SHIFT 0x1b +#define RLC_CE_ERR_STATUS_HIGH__ECC_MASK 0x00000001L +#define RLC_CE_ERR_STATUS_HIGH__OTHER_MASK 0x00000002L +#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG_MASK 0x00000004L +#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO_MASK 0x007FFFF8L +#define RLC_CE_ERR_STATUS_HIGH__CE_CNT_MASK 0x03800000L +#define RLC_CE_ERR_STATUS_HIGH__POISON_MASK 0x04000000L +#define RLC_CE_ERR_STATUS_HIGH__RESERVED_MASK 0xF8000000L //RLC_RLCV_SPARE_INT #define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0 #define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1 From 77462ab8c62b0dc65261c042771efea44a111131 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 7 Feb 2023 18:30:55 +0800 Subject: [PATCH 568/861] drm/amdgpu: add RAS definitions for GFX Add common GFX RAS definitions. v2: remove instance from amdgpu_gfx_ras_reg_entry, amdgpu_ras_err_status_reg_entry has already defined it. v3: remove memory id definitions from amdgpu_gfx.h, they are related to IP version. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 39 +++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 0df53fe7b199..ce0f7a8ad4b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -73,6 +73,32 @@ enum amdgpu_pkg_type { AMDGPU_PKG_TYPE_UNKNOWN, }; +enum amdgpu_gfx_ras_mem_id_type { + AMDGPU_GFX_CP_MEM = 0, + AMDGPU_GFX_GCEA_MEM, + AMDGPU_GFX_GC_CANE_MEM, + AMDGPU_GFX_GCUTCL2_MEM, + AMDGPU_GFX_GDS_MEM, + AMDGPU_GFX_LDS_MEM, + AMDGPU_GFX_RLC_MEM, + AMDGPU_GFX_SP_MEM, + AMDGPU_GFX_SPI_MEM, + AMDGPU_GFX_SQC_MEM, + AMDGPU_GFX_SQ_MEM, + AMDGPU_GFX_TA_MEM, + AMDGPU_GFX_TCC_MEM, + AMDGPU_GFX_TCA_MEM, + AMDGPU_GFX_TCI_MEM, + AMDGPU_GFX_TCP_MEM, + AMDGPU_GFX_TD_MEM, + AMDGPU_GFX_TCX_MEM, + AMDGPU_GFX_ATC_L2_MEM, + AMDGPU_GFX_UTCL2_MEM, + AMDGPU_GFX_VML2_MEM, + AMDGPU_GFX_VML2_WALKER_MEM, + AMDGPU_GFX_MEM_TYPE_NUM +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -410,6 +436,19 @@ struct amdgpu_gfx { struct mutex partition_mutex; }; +struct amdgpu_gfx_ras_reg_entry { + struct amdgpu_ras_err_status_reg_entry reg_entry; + enum amdgpu_gfx_ras_mem_id_type mem_id_type; + uint32_t se_num; +}; + +struct amdgpu_gfx_ras_mem_id_entry { + const struct amdgpu_ras_memory_id_entry *mem_id_ent; + uint32_t size; +}; + +#define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)}, + #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) From 5c1c09a71634423604c47502d8059a5c098c6f40 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 6 Feb 2023 11:38:19 +0800 Subject: [PATCH 569/861] drm/amdgpu: add RAS error count definitions for gfx_v9_4_3 Prepare for the query of GFX RAS ce/ue count. v2: remove xcp operation. only select_se_sh when instance number is more than 1. v3: add more CE/UE registsers to query list. add check for se_num before select_se_sh. change instance from 0 to xcc_id for register access. v4: move gfx memory id definitions to gfx_v9_4_3. v5: create a dedicated patch for adding error count query function. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 740 ++++++++++++++++++++++++ 1 file changed, 740 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index e6069d081f71..188b4d9a2cbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2980,6 +2980,746 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } } +enum amdgpu_gfx_cp_ras_mem_id { + AMDGPU_GFX_CP_MEM1 = 1, + AMDGPU_GFX_CP_MEM2, + AMDGPU_GFX_CP_MEM3, + AMDGPU_GFX_CP_MEM4, + AMDGPU_GFX_CP_MEM5, +}; + +enum amdgpu_gfx_gcea_ras_mem_id { + AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4, + AMDGPU_GFX_GCEA_IORD_CMDMEM, + AMDGPU_GFX_GCEA_GMIWR_CMDMEM, + AMDGPU_GFX_GCEA_GMIRD_CMDMEM, + AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, + AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, + AMDGPU_GFX_GCEA_MAM_DMEM0, + AMDGPU_GFX_GCEA_MAM_DMEM1, + AMDGPU_GFX_GCEA_MAM_DMEM2, + AMDGPU_GFX_GCEA_MAM_DMEM3, + AMDGPU_GFX_GCEA_MAM_AMEM0, + AMDGPU_GFX_GCEA_MAM_AMEM1, + AMDGPU_GFX_GCEA_MAM_AMEM2, + AMDGPU_GFX_GCEA_MAM_AMEM3, + AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, + AMDGPU_GFX_GCEA_WRET_TAGMEM, + AMDGPU_GFX_GCEA_RRET_TAGMEM, + AMDGPU_GFX_GCEA_IOWR_DATAMEM, + AMDGPU_GFX_GCEA_GMIWR_DATAMEM, + AMDGPU_GFX_GCEA_DRAM_DATAMEM, +}; + +enum amdgpu_gfx_gc_cane_ras_mem_id { + AMDGPU_GFX_GC_CANE_MEM0 = 0, +}; + +enum amdgpu_gfx_gcutcl2_ras_mem_id { + AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160, +}; + +enum amdgpu_gfx_gds_ras_mem_id { + AMDGPU_GFX_GDS_MEM0 = 0, +}; + +enum amdgpu_gfx_lds_ras_mem_id { + AMDGPU_GFX_LDS_BANK0 = 0, + AMDGPU_GFX_LDS_BANK1, + AMDGPU_GFX_LDS_BANK2, + AMDGPU_GFX_LDS_BANK3, + AMDGPU_GFX_LDS_BANK4, + AMDGPU_GFX_LDS_BANK5, + AMDGPU_GFX_LDS_BANK6, + AMDGPU_GFX_LDS_BANK7, + AMDGPU_GFX_LDS_BANK8, + AMDGPU_GFX_LDS_BANK9, + AMDGPU_GFX_LDS_BANK10, + AMDGPU_GFX_LDS_BANK11, + AMDGPU_GFX_LDS_BANK12, + AMDGPU_GFX_LDS_BANK13, + AMDGPU_GFX_LDS_BANK14, + AMDGPU_GFX_LDS_BANK15, + AMDGPU_GFX_LDS_BANK16, + AMDGPU_GFX_LDS_BANK17, + AMDGPU_GFX_LDS_BANK18, + AMDGPU_GFX_LDS_BANK19, + AMDGPU_GFX_LDS_BANK20, + AMDGPU_GFX_LDS_BANK21, + AMDGPU_GFX_LDS_BANK22, + AMDGPU_GFX_LDS_BANK23, + AMDGPU_GFX_LDS_BANK24, + AMDGPU_GFX_LDS_BANK25, + AMDGPU_GFX_LDS_BANK26, + AMDGPU_GFX_LDS_BANK27, + AMDGPU_GFX_LDS_BANK28, + AMDGPU_GFX_LDS_BANK29, + AMDGPU_GFX_LDS_BANK30, + AMDGPU_GFX_LDS_BANK31, + AMDGPU_GFX_LDS_SP_BUFFER_A, + AMDGPU_GFX_LDS_SP_BUFFER_B, +}; + +enum amdgpu_gfx_rlc_ras_mem_id { + AMDGPU_GFX_RLC_GPMF32 = 1, + AMDGPU_GFX_RLC_RLCVF32, + AMDGPU_GFX_RLC_SCRATCH, + AMDGPU_GFX_RLC_SRM_ARAM, + AMDGPU_GFX_RLC_SRM_DRAM, + AMDGPU_GFX_RLC_TCTAG, + AMDGPU_GFX_RLC_SPM_SE, + AMDGPU_GFX_RLC_SPM_GRBMT, +}; + +enum amdgpu_gfx_sp_ras_mem_id { + AMDGPU_GFX_SP_SIMDID0 = 0, +}; + +enum amdgpu_gfx_spi_ras_mem_id { + AMDGPU_GFX_SPI_MEM0 = 0, + AMDGPU_GFX_SPI_MEM1, + AMDGPU_GFX_SPI_MEM2, + AMDGPU_GFX_SPI_MEM3, +}; + +enum amdgpu_gfx_sqc_ras_mem_id { + AMDGPU_GFX_SQC_INST_CACHE_A = 100, + AMDGPU_GFX_SQC_INST_CACHE_B = 101, + AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102, + AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103, + AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104, + AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105, + AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106, + AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107, + AMDGPU_GFX_SQC_DATA_CACHE_A = 200, + AMDGPU_GFX_SQC_DATA_CACHE_B = 201, + AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202, + AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203, + AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204, + AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205, + AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206, + AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207, + AMDGPU_GFX_SQC_DIRTY_BIT_A = 208, + AMDGPU_GFX_SQC_DIRTY_BIT_B = 209, + AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210, + AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211, + AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212, + AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213, + AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108, +}; + +enum amdgpu_gfx_sq_ras_mem_id { + AMDGPU_GFX_SQ_SGPR_MEM0 = 0, + AMDGPU_GFX_SQ_SGPR_MEM1, + AMDGPU_GFX_SQ_SGPR_MEM2, + AMDGPU_GFX_SQ_SGPR_MEM3, +}; + +enum amdgpu_gfx_ta_ras_mem_id { + AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1, + AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, + AMDGPU_GFX_TA_FS_CFIFO_RAM, + AMDGPU_GFX_TA_FSX_LFIFO, + AMDGPU_GFX_TA_FS_DFIFO_RAM, +}; + +enum amdgpu_gfx_tcc_ras_mem_id { + AMDGPU_GFX_TCC_MEM1 = 1, +}; + +enum amdgpu_gfx_tca_ras_mem_id { + AMDGPU_GFX_TCA_MEM1 = 1, +}; + +enum amdgpu_gfx_tci_ras_mem_id { + AMDGPU_GFX_TCIW_MEM = 1, +}; + +enum amdgpu_gfx_tcp_ras_mem_id { + AMDGPU_GFX_TCP_LFIFO0 = 1, + AMDGPU_GFX_TCP_SET0BANK0_RAM, + AMDGPU_GFX_TCP_SET0BANK1_RAM, + AMDGPU_GFX_TCP_SET0BANK2_RAM, + AMDGPU_GFX_TCP_SET0BANK3_RAM, + AMDGPU_GFX_TCP_SET1BANK0_RAM, + AMDGPU_GFX_TCP_SET1BANK1_RAM, + AMDGPU_GFX_TCP_SET1BANK2_RAM, + AMDGPU_GFX_TCP_SET1BANK3_RAM, + AMDGPU_GFX_TCP_SET2BANK0_RAM, + AMDGPU_GFX_TCP_SET2BANK1_RAM, + AMDGPU_GFX_TCP_SET2BANK2_RAM, + AMDGPU_GFX_TCP_SET2BANK3_RAM, + AMDGPU_GFX_TCP_SET3BANK0_RAM, + AMDGPU_GFX_TCP_SET3BANK1_RAM, + AMDGPU_GFX_TCP_SET3BANK2_RAM, + AMDGPU_GFX_TCP_SET3BANK3_RAM, + AMDGPU_GFX_TCP_VM_FIFO, + AMDGPU_GFX_TCP_DB_TAGRAM0, + AMDGPU_GFX_TCP_DB_TAGRAM1, + AMDGPU_GFX_TCP_DB_TAGRAM2, + AMDGPU_GFX_TCP_DB_TAGRAM3, + AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, + AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, + AMDGPU_GFX_TCP_CMD_FIFO, +}; + +enum amdgpu_gfx_td_ras_mem_id { + AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1, + AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, + AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, +}; + +enum amdgpu_gfx_tcx_ras_mem_id { + AMDGPU_GFX_TCX_FIFOD0 = 0, + AMDGPU_GFX_TCX_FIFOD1, + AMDGPU_GFX_TCX_FIFOD2, + AMDGPU_GFX_TCX_FIFOD3, + AMDGPU_GFX_TCX_FIFOD4, + AMDGPU_GFX_TCX_FIFOD5, + AMDGPU_GFX_TCX_FIFOD6, + AMDGPU_GFX_TCX_FIFOD7, + AMDGPU_GFX_TCX_FIFOB0, + AMDGPU_GFX_TCX_FIFOB1, + AMDGPU_GFX_TCX_FIFOB2, + AMDGPU_GFX_TCX_FIFOB3, + AMDGPU_GFX_TCX_FIFOB4, + AMDGPU_GFX_TCX_FIFOB5, + AMDGPU_GFX_TCX_FIFOB6, + AMDGPU_GFX_TCX_FIFOB7, + AMDGPU_GFX_TCX_FIFOA0, + AMDGPU_GFX_TCX_FIFOA1, + AMDGPU_GFX_TCX_FIFOA2, + AMDGPU_GFX_TCX_FIFOA3, + AMDGPU_GFX_TCX_FIFOA4, + AMDGPU_GFX_TCX_FIFOA5, + AMDGPU_GFX_TCX_FIFOA6, + AMDGPU_GFX_TCX_FIFOA7, + AMDGPU_GFX_TCX_CFIFO0, + AMDGPU_GFX_TCX_CFIFO1, + AMDGPU_GFX_TCX_CFIFO2, + AMDGPU_GFX_TCX_CFIFO3, + AMDGPU_GFX_TCX_CFIFO4, + AMDGPU_GFX_TCX_CFIFO5, + AMDGPU_GFX_TCX_CFIFO6, + AMDGPU_GFX_TCX_CFIFO7, + AMDGPU_GFX_TCX_FIFO_ACKB0, + AMDGPU_GFX_TCX_FIFO_ACKB1, + AMDGPU_GFX_TCX_FIFO_ACKB2, + AMDGPU_GFX_TCX_FIFO_ACKB3, + AMDGPU_GFX_TCX_FIFO_ACKB4, + AMDGPU_GFX_TCX_FIFO_ACKB5, + AMDGPU_GFX_TCX_FIFO_ACKB6, + AMDGPU_GFX_TCX_FIFO_ACKB7, + AMDGPU_GFX_TCX_FIFO_ACKD0, + AMDGPU_GFX_TCX_FIFO_ACKD1, + AMDGPU_GFX_TCX_FIFO_ACKD2, + AMDGPU_GFX_TCX_FIFO_ACKD3, + AMDGPU_GFX_TCX_FIFO_ACKD4, + AMDGPU_GFX_TCX_FIFO_ACKD5, + AMDGPU_GFX_TCX_FIFO_ACKD6, + AMDGPU_GFX_TCX_FIFO_ACKD7, + AMDGPU_GFX_TCX_DST_FIFOA0, + AMDGPU_GFX_TCX_DST_FIFOA1, + AMDGPU_GFX_TCX_DST_FIFOA2, + AMDGPU_GFX_TCX_DST_FIFOA3, + AMDGPU_GFX_TCX_DST_FIFOA4, + AMDGPU_GFX_TCX_DST_FIFOA5, + AMDGPU_GFX_TCX_DST_FIFOA6, + AMDGPU_GFX_TCX_DST_FIFOA7, + AMDGPU_GFX_TCX_DST_FIFOB0, + AMDGPU_GFX_TCX_DST_FIFOB1, + AMDGPU_GFX_TCX_DST_FIFOB2, + AMDGPU_GFX_TCX_DST_FIFOB3, + AMDGPU_GFX_TCX_DST_FIFOB4, + AMDGPU_GFX_TCX_DST_FIFOB5, + AMDGPU_GFX_TCX_DST_FIFOB6, + AMDGPU_GFX_TCX_DST_FIFOB7, + AMDGPU_GFX_TCX_DST_FIFOD0, + AMDGPU_GFX_TCX_DST_FIFOD1, + AMDGPU_GFX_TCX_DST_FIFOD2, + AMDGPU_GFX_TCX_DST_FIFOD3, + AMDGPU_GFX_TCX_DST_FIFOD4, + AMDGPU_GFX_TCX_DST_FIFOD5, + AMDGPU_GFX_TCX_DST_FIFOD6, + AMDGPU_GFX_TCX_DST_FIFOD7, + AMDGPU_GFX_TCX_DST_FIFO_ACKB0, + AMDGPU_GFX_TCX_DST_FIFO_ACKB1, + AMDGPU_GFX_TCX_DST_FIFO_ACKB2, + AMDGPU_GFX_TCX_DST_FIFO_ACKB3, + AMDGPU_GFX_TCX_DST_FIFO_ACKB4, + AMDGPU_GFX_TCX_DST_FIFO_ACKB5, + AMDGPU_GFX_TCX_DST_FIFO_ACKB6, + AMDGPU_GFX_TCX_DST_FIFO_ACKB7, + AMDGPU_GFX_TCX_DST_FIFO_ACKD0, + AMDGPU_GFX_TCX_DST_FIFO_ACKD1, + AMDGPU_GFX_TCX_DST_FIFO_ACKD2, + AMDGPU_GFX_TCX_DST_FIFO_ACKD3, + AMDGPU_GFX_TCX_DST_FIFO_ACKD4, + AMDGPU_GFX_TCX_DST_FIFO_ACKD5, + AMDGPU_GFX_TCX_DST_FIFO_ACKD6, + AMDGPU_GFX_TCX_DST_FIFO_ACKD7, +}; + +enum amdgpu_gfx_atc_l2_ras_mem_id { + AMDGPU_GFX_ATC_L2_MEM0 = 0, +}; + +enum amdgpu_gfx_utcl2_ras_mem_id { + AMDGPU_GFX_UTCL2_MEM0 = 0, +}; + +enum amdgpu_gfx_vml2_ras_mem_id { + AMDGPU_GFX_VML2_MEM0 = 0, +}; + +enum amdgpu_gfx_vml2_walker_ras_mem_id { + AMDGPU_GFX_VML2_WALKER_MEM0 = 0, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = { + {AMDGPU_GFX_CP_MEM1, "CP_MEM1"}, + {AMDGPU_GFX_CP_MEM2, "CP_MEM2"}, + {AMDGPU_GFX_CP_MEM3, "CP_MEM3"}, + {AMDGPU_GFX_CP_MEM4, "CP_MEM4"}, + {AMDGPU_GFX_CP_MEM5, "CP_MEM5"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = { + {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"}, + {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"}, + {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"}, + {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"}, + {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"}, + {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"}, + {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"}, + {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"}, + {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"}, + {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"}, + {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"}, + {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"}, + {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"}, + {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"}, + {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"}, + {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"}, + {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"}, + {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"}, + {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"}, + {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = { + {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = { + {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = { + {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = { + {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"}, + {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"}, + {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"}, + {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"}, + {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"}, + {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"}, + {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"}, + {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"}, + {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"}, + {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"}, + {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"}, + {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"}, + {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"}, + {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"}, + {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"}, + {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"}, + {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"}, + {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"}, + {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"}, + {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"}, + {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"}, + {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"}, + {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"}, + {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"}, + {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"}, + {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"}, + {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"}, + {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"}, + {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"}, + {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"}, + {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"}, + {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"}, + {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"}, + {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = { + {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"}, + {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"}, + {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"}, + {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"}, + {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"}, + {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"}, + {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"}, + {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = { + {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = { + {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"}, + {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"}, + {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"}, + {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = { + {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"}, + {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"}, + {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"}, + {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"}, + {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"}, + {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"}, + {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"}, + {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"}, + {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"}, + {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"}, + {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"}, + {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"}, + {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"}, + {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"}, + {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"}, + {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"}, + {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"}, + {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"}, + {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"}, + {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"}, + {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"}, + {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"}, + {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = { + {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"}, + {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"}, + {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"}, + {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = { + {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"}, + {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"}, + {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"}, + {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"}, + {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = { + {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = { + {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = { + {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = { + {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"}, + {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"}, + {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"}, + {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"}, + {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"}, + {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"}, + {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"}, + {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"}, + {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"}, + {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"}, + {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"}, + {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"}, + {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"}, + {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"}, + {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"}, + {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"}, + {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"}, + {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"}, + {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"}, + {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"}, + {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"}, + {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"}, + {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"}, + {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"}, + {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = { + {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"}, + {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"}, + {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = { + {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"}, + {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"}, + {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"}, + {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"}, + {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"}, + {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"}, + {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"}, + {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"}, + {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"}, + {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"}, + {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"}, + {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"}, + {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"}, + {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"}, + {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"}, + {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"}, + {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"}, + {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"}, + {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"}, + {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"}, + {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"}, + {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"}, + {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"}, + {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"}, + {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"}, + {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"}, + {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"}, + {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"}, + {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"}, + {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"}, + {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"}, + {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"}, + {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"}, + {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"}, + {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"}, + {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"}, + {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"}, + {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"}, + {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"}, + {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"}, + {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"}, + {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"}, + {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"}, + {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"}, + {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"}, + {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"}, + {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"}, + {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"}, + {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"}, + {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"}, + {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"}, + {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"}, + {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"}, + {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"}, + {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"}, + {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"}, + {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"}, + {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"}, + {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"}, + {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"}, + {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"}, + {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"}, + {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"}, + {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"}, + {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"}, + {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"}, + {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"}, + {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"}, + {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"}, + {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"}, + {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"}, + {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"}, + {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = { + {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = { + {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = { + {AMDGPU_GFX_VML2_MEM, "VML2_MEM"}, +}; + +static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = { + {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"}, +}; + +static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = { + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list) + AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list) +}; + +static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, + AMDGPU_GFX_RLC_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, + AMDGPU_GFX_CP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, + AMDGPU_GFX_CP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, + AMDGPU_GFX_CP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, + AMDGPU_GFX_GDS_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, + AMDGPU_GFX_GC_CANE_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, + AMDGPU_GFX_SPI_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, + AMDGPU_GFX_SP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, + AMDGPU_GFX_SP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, + AMDGPU_GFX_SQ_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), + 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, + AMDGPU_GFX_SQC_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), + 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, + AMDGPU_GFX_TCX_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI), + 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, + AMDGPU_GFX_TCC_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, + AMDGPU_GFX_TA_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), + 31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, + AMDGPU_GFX_TCI_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, + AMDGPU_GFX_TCP_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, + AMDGPU_GFX_TD_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), + 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, + AMDGPU_GFX_GCEA_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, + AMDGPU_GFX_LDS_MEM, 1}, +}; + +static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, + AMDGPU_GFX_RLC_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, + AMDGPU_GFX_CP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, + AMDGPU_GFX_CP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, + AMDGPU_GFX_CP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, + AMDGPU_GFX_GDS_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, + AMDGPU_GFX_GC_CANE_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), + 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, + AMDGPU_GFX_SPI_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, + AMDGPU_GFX_SP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, + AMDGPU_GFX_SP_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, + AMDGPU_GFX_SQ_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), + 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, + AMDGPU_GFX_SQC_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), + 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, + AMDGPU_GFX_TCX_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI), + 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, + AMDGPU_GFX_TCC_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, + AMDGPU_GFX_TA_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), + 31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, + AMDGPU_GFX_TCI_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, + AMDGPU_GFX_TCP_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, + AMDGPU_GFX_TD_MEM, 8}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), + 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"}, + AMDGPU_GFX_TCA_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI), + 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, + AMDGPU_GFX_GCEA_MEM, 1}, + {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), + 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, + AMDGPU_GFX_LDS_MEM, 1}, +}; + static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 }; From bfa84da6185cb1897fcee0ac3815625d162d39f0 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 17 Mar 2023 17:13:46 +0800 Subject: [PATCH 570/861] drm/amdgpu: add RAS error count query for gfx_v9_4_3 Query GFX RAS ce/ue count. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 56 +++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 188b4d9a2cbb..bfd041ba51d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -3724,6 +3724,55 @@ static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 }; +static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, + void *ras_error_status, int xcc_id) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + unsigned long ce_count = 0, ue_count = 0; + uint32_t i, j, k; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { + for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { + /* no need to select if instance number is 1 */ + if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || + gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) + gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); + + amdgpu_ras_inst_query_ras_error_count(adev, + &(gfx_v9_4_3_ce_reg_list[i].reg_entry), + 1, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size, + GET_INST(GC, xcc_id), + AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, + &ce_count); + + amdgpu_ras_inst_query_ras_error_count(adev, + &(gfx_v9_4_3_ue_reg_list[i].reg_entry), + 1, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, + GET_INST(GC, xcc_id), + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &ue_count); + } + } + } + + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); + + /* the caller should make sure initialize value of + * err_data->ue_count and err_data->ce_count + */ + err_data->ce_count += ce_count; + err_data->ue_count += ue_count; +} + static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev, int xcc_id) { @@ -3826,6 +3875,13 @@ static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id); } +static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_gfx_ras_error_func(adev, ras_error_status, + gfx_v9_4_3_inst_query_ras_err_count); +} + static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) { amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); From 30feef0676092bdb4b8697e68b8d5864d54f096f Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 8 Feb 2023 14:54:01 +0800 Subject: [PATCH 571/861] drm/amdgpu: add RAS error count reset for gfx_v9_4_3 Add GFX RAS error count reset function. v2: remove xcp operation. only select_se_sh when instance number is more than 1. v3: add check for se_num before select_se_sh. change instance from 0 to xcc_id for register access. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 38 +++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index bfd041ba51d6..ac5270d5eff4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -3773,6 +3773,39 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, err_data->ue_count += ue_count; } +static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, + void *ras_error_status, int xcc_id) +{ + uint32_t i, j, k; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { + for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { + /* no need to select if instance number is 1 */ + if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || + gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) + gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); + + amdgpu_ras_inst_reset_ras_error_count(adev, + &(gfx_v9_4_3_ce_reg_list[i].reg_entry), + 1, + GET_INST(GC, xcc_id)); + + amdgpu_ras_inst_reset_ras_error_count(adev, + &(gfx_v9_4_3_ue_reg_list[i].reg_entry), + 1, + GET_INST(GC, xcc_id)); + } + } + } + + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev, int xcc_id) { @@ -3882,6 +3915,11 @@ static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, gfx_v9_4_3_inst_query_ras_err_count); } +static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) +{ + amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); +} + static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) { amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); From 0386d52d1516d80b81a25552df74b8a82dfb77f3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 10 Feb 2023 18:41:28 +0800 Subject: [PATCH 572/861] drm/amdgpu: add sq timeout status functions for gfx_v9_4_3 Query and reset sq timeout status. v2: change instance from 0 to xcc_id for register access. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 98 +++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ac5270d5eff4..5bd2f40a817e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -3865,11 +3865,87 @@ static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev, } } +static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev, + uint32_t status, int xcc_id) +{ + struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + uint32_t i, simd, wave; + uint32_t wave_status; + uint32_t wave_pc_lo, wave_pc_hi; + uint32_t wave_exec_lo, wave_exec_hi; + uint32_t wave_inst_dw0, wave_inst_dw1; + uint32_t wave_ib_sts; + + for (i = 0; i < 32; i++) { + if (!((i << 1) & status)) + continue; + + simd = i / cu_info->max_waves_per_simd; + wave = i % cu_info->max_waves_per_simd; + + wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); + wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); + wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); + wave_exec_lo = + wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); + wave_exec_hi = + wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); + wave_inst_dw0 = + wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); + wave_inst_dw1 = + wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); + wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); + + dev_info( + adev->dev, + "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n", + simd, wave, wave_status, + ((uint64_t)wave_pc_hi << 32 | wave_pc_lo), + ((uint64_t)wave_exec_hi << 32 | wave_exec_lo), + ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0), + wave_ib_sts); + } +} + +static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev, + int xcc_id) +{ + uint32_t se_idx, sh_idx, cu_idx; + uint32_t status; + + mutex_lock(&adev->grbm_idx_mutex); + for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { + for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { + for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { + gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, + cu_idx, xcc_id); + status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), + regSQ_TIMEOUT_STATUS); + if (status != 0) { + dev_info( + adev->dev, + "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n", + se_idx, sh_idx, cu_idx); + gfx_v9_4_3_log_cu_timeout_status( + adev, status, xcc_id); + } + /* clear old status */ + WREG32_SOC15(GC, GET_INST(GC, xcc_id), + regSQ_TIMEOUT_STATUS, 0); + } + } + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, void *ras_error_status, int xcc_id) { gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id); gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); + gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id); } static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev, @@ -3901,11 +3977,33 @@ static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev, mutex_unlock(&adev->grbm_idx_mutex); } +static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev, + int xcc_id) +{ + uint32_t se_idx, sh_idx, cu_idx; + + mutex_lock(&adev->grbm_idx_mutex); + for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { + for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { + for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { + gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, + cu_idx, xcc_id); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), + regSQ_TIMEOUT_STATUS, 0); + } + } + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, void *ras_error_status, int xcc_id) { gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id); gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id); + gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); } static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, From 92ecb92ccc839c4c4b51ab1025cde5dd82c2fb4b Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 8 Feb 2023 17:05:08 +0800 Subject: [PATCH 573/861] drm/amdgpu: initialize RAS for gfx_v9_4_3 Register GFX RAS functions and initialize GFX RAS. v2: remove xcp operations. v3: reuse the return value of gfx_ras_sw_init. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 5bd2f40a817e..e5cfb3adb3b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -47,6 +47,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L +struct amdgpu_gfx_ras gfx_v9_4_3_ras; + static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); @@ -659,6 +661,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) u32 gb_addr_config; adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; + adev->gfx.ras = &gfx_v9_4_3_ras; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): @@ -845,7 +848,7 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; - return 0; + return amdgpu_gfx_ras_sw_init(adev); } static int gfx_v9_4_3_sw_fini(void *handle) @@ -4342,3 +4345,16 @@ struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { .suspend = &gfx_v9_4_3_xcp_suspend, .resume = &gfx_v9_4_3_xcp_resume }; + +struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { + .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_3_query_ras_error_status, + .reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status, +}; + +struct amdgpu_gfx_ras gfx_v9_4_3_ras = { + .ras_block = { + .hw_ops = &gfx_v9_4_3_ras_ops, + }, +}; From 45b3a914d40e63d2c9e3a3e02fb2014be975b9b0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 16 May 2023 17:16:30 -0400 Subject: [PATCH 574/861] drm/amdgpu/gmc9: fix 64 bit division in partition code Rework logic or use do_div() to avoid problems on 32 bit. v2: add a missing case for XCP macro v3: fix out of bounds array access v4: fix xcp handling harder Acked-by: Guchun Chen (v1) Reviewed-by: Mukul Joshi (v3) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 9 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++----- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++++-- 5 files changed, 35 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 739eb7c0d133..5de92c9ab18f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -794,3 +794,18 @@ void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) { kgd2kfd_unlock_kfd(); } + + +u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) +{ + u64 tmp; + s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); + + if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { + tmp = adev->gmc.mem_partitions[mem_id].size; + do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); + return tmp; + } else { + return adev->gmc.real_vram_size; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index be43d71ba7ef..94cc456761e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -333,15 +333,14 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); +u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); + #define KFD_XCP_MEM_ID(adev, xcp_id) \ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) -#define KFD_XCP_MEMORY_SIZE(adev, xcp_id)\ - ((adev)->gmc.num_mem_partitions && (xcp_id) >= 0 ?\ - (adev)->gmc.mem_partitions[KFD_XCP_MEM_ID((adev), (xcp_id))].size /\ - (adev)->xcp_mgr->num_xcp_per_mem_partition :\ - (adev)->gmc.real_vram_size) +#define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) + #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 23101c82519a..902773ce41b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -814,11 +814,14 @@ static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; uint64_t total_pages = ttm->num_pages; int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); - uint64_t page_idx, pages_per_xcc = total_pages / num_xcc; + uint64_t page_idx, pages_per_xcc; int i; uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); + pages_per_xcc = total_pages; + do_div(pages_per_xcc, num_xcc); + for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { /* MQD page: use default flags */ amdgpu_gart_bind(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7ea80bdf8e1e..f70e666cecf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1914,9 +1914,10 @@ gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, adev->gmc.num_mem_partitions = num_ranges; /* If there is only partition, don't use entire size */ - if (adev->gmc.num_mem_partitions == 1) - mem_ranges[0].size = - (mem_ranges[0].size * (mem_groups - 1) / mem_groups); + if (adev->gmc.num_mem_partitions == 1) { + mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1); + do_div(mem_ranges[0].size, mem_groups); + } } static void @@ -1948,8 +1949,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, break; } - size = (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) / - adev->gmc.num_mem_partitions; + size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT; + size /= adev->gmc.num_mem_partitions; for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { mem_ranges[i].range.fpfn = start_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 206851c9e642..b0f0d31bf3e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1939,10 +1939,14 @@ void svm_range_set_max_pages(struct amdgpu_device *adev) uint64_t max_pages; uint64_t pages, _pages; uint64_t min_pages = 0; - int i; + int i, id; for (i = 0; i < adev->kfd.dev->num_nodes; i++) { - pages = KFD_XCP_MEMORY_SIZE(adev, adev->kfd.dev->nodes[i]->xcp->id) >> 17; + if (adev->kfd.dev->nodes[i]->xcp) + id = adev->kfd.dev->nodes[i]->xcp->id; + else + id = -1; + pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17; pages = clamp(pages, 1ULL << 9, 1ULL << 18); pages = rounddown_pow_of_two(pages); min_pages = min_not_zero(min_pages, pages); From 0ce50b2efe08e56224d11d735310b353e0e4e222 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 16 May 2023 16:56:49 -0400 Subject: [PATCH 575/861] drm/amdgpu/vcn4: fix endian conversion sq.is_enabled is a byte so there is no need to endian swap it. Acked-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index c77ceaf53dcd..339842382a1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -124,7 +124,7 @@ static int vcn_v4_0_3_sw_init(void *handle) fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); - fw_shared->sq.is_enabled = cpu_to_le32(true); + fw_shared->sq.is_enabled = true; if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); From 01c3f464743b64e6e65cb9bad951458986819a42 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 17 May 2023 20:10:48 +0530 Subject: [PATCH 576/861] drm/amd/amdgpu: Fix errors & warnings in amdgpu_ttm.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below checkpatch insisted error & warnings: ERROR: Macros with complex values should be enclosed in parentheses WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: braces {} are not necessary for single statement blocks WARNING: Block comments use a trailing */ on a separate line WARNING: Missing a blank line after declarations Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Acked-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 902773ce41b5..b8adcebb0d89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -65,7 +65,7 @@ MODULE_IMPORT_NS(DMA_BUF); -#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 +#define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128) static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, struct ttm_tt *ttm, @@ -184,11 +184,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct amdgpu_res_cursor *mm_cur, - unsigned window, struct amdgpu_ring *ring, + unsigned int window, struct amdgpu_ring *ring, bool tmz, uint64_t *size, uint64_t *addr) { struct amdgpu_device *adev = ring->adev; - unsigned offset, num_pages, num_dw, num_bytes; + unsigned int offset, num_pages, num_dw, num_bytes; uint64_t src_addr, dst_addr; struct amdgpu_job *job; void *cpu_addr; @@ -1060,9 +1060,9 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, enum ttm_caching caching; gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); - if (gtt == NULL) { + if (!gtt) return NULL; - } + gtt->gobj = &bo->base; if (adev->gmc.mem_partitions && abo->xcp_id >= 0) gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); @@ -1847,9 +1847,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) *place on the VRAM, so reserve it early. */ r = amdgpu_ttm_fw_reserve_vram_init(adev); - if (r) { + if (r) return r; - } /* *The reserved vram for driver must be pinned to the specified @@ -1873,7 +1872,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS - * and driver. */ + * and driver. + */ if (!adev->gmc.is_app_apu) { r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, @@ -1902,7 +1902,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", - (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); + (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); /* Compute GTT size, either based on TTM limit * or whatever the user passed on module init. @@ -1919,7 +1919,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } DRM_INFO("amdgpu: %uM of GTT memory ready.\n", - (unsigned)(gtt_size / (1024 * 1024))); + (unsigned int)(gtt_size / (1024 * 1024))); /* Initialize preemptible memory pool */ r = amdgpu_preempt_mgr_init(adev); @@ -1961,6 +1961,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) void amdgpu_ttm_fini(struct amdgpu_device *adev) { int idx; + if (!adev->mman.initialized) return; @@ -2089,10 +2090,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, bool vm_needs_flush, bool tmz) { struct amdgpu_device *adev = ring->adev; - unsigned num_loops, num_dw; + unsigned int num_loops, num_dw; struct amdgpu_job *job; uint32_t max_bytes; - unsigned i; + unsigned int i; int r; if (!direct_submit && !ring->sched.ready) { From f2cd6b26922e68ffafd14a9128e20630296e430d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 17 May 2023 15:04:35 -0400 Subject: [PATCH 577/861] drm/amdkfd: fix stack size in svm_range_validate_and_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocate large local variable on heap to avoid exceeding the stack size: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c: In function ‘svm_range_validate_and_map’: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:1690:1: warning: the frame size of 2360 bytes is larger than 2048 bytes [-Wframe-larger-than=] Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 56 ++++++++++++++++------------ 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b0f0d31bf3e6..ee16130ddc75 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1554,48 +1554,54 @@ static int svm_range_validate_and_map(struct mm_struct *mm, struct svm_range *prange, int32_t gpuidx, bool intr, bool wait, bool flush_tlb) { - struct svm_validate_context ctx; + struct svm_validate_context *ctx; unsigned long start, end, addr; struct kfd_process *p; void *owner; int32_t idx; int r = 0; - ctx.process = container_of(prange->svms, struct kfd_process, svms); - ctx.prange = prange; - ctx.intr = intr; + ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->process = container_of(prange->svms, struct kfd_process, svms); + ctx->prange = prange; + ctx->intr = intr; if (gpuidx < MAX_GPU_INSTANCE) { - bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE); - bitmap_set(ctx.bitmap, gpuidx, 1); - } else if (ctx.process->xnack_enabled) { - bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); + bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE); + bitmap_set(ctx->bitmap, gpuidx, 1); + } else if (ctx->process->xnack_enabled) { + bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); /* If prefetch range to GPU, or GPU retry fault migrate range to * GPU, which has ACCESS attribute to the range, create mapping * on that GPU. */ if (prange->actual_loc) { - gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process, + gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process, prange->actual_loc); if (gpuidx < 0) { WARN_ONCE(1, "failed get device by id 0x%x\n", prange->actual_loc); - return -EINVAL; + r = -EINVAL; + goto free_ctx; } if (test_bit(gpuidx, prange->bitmap_access)) - bitmap_set(ctx.bitmap, gpuidx, 1); + bitmap_set(ctx->bitmap, gpuidx, 1); } } else { - bitmap_or(ctx.bitmap, prange->bitmap_access, + bitmap_or(ctx->bitmap, prange->bitmap_access, prange->bitmap_aip, MAX_GPU_INSTANCE); } - if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) { - if (!prange->mapped_to_gpu) - return 0; + if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { + if (!prange->mapped_to_gpu) { + r = 0; + goto free_ctx; + } - bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); + bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); } if (prange->actual_loc && !prange->ttm_res) { @@ -1603,15 +1609,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm, * svm_migrate_ram_to_vram after allocating a BO. */ WARN_ONCE(1, "VRAM BO missing during validation\n"); - return -EINVAL; + r = -EINVAL; + goto free_ctx; } - svm_range_reserve_bos(&ctx); + svm_range_reserve_bos(ctx); p = container_of(prange->svms, struct kfd_process, svms); - owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap, + owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, MAX_GPU_INSTANCE)); - for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) { + for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) { if (kfd_svm_page_owner(p, idx) != owner) { owner = NULL; break; @@ -1648,7 +1655,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } offset = (addr - start) >> PAGE_SHIFT; - r = svm_range_dma_map(prange, ctx.bitmap, offset, npages, + r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, hmm_range->hmm_pfns); if (r) { pr_debug("failed %d to dma map range\n", r); @@ -1668,7 +1675,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } r = svm_range_map_to_gpus(prange, offset, npages, readonly, - ctx.bitmap, wait, flush_tlb); + ctx->bitmap, wait, flush_tlb); unlock_out: svm_range_unlock(prange); @@ -1682,11 +1689,14 @@ unlock_out: } unreserve_out: - svm_range_unreserve_bos(&ctx); + svm_range_unreserve_bos(ctx); if (!r) prange->validate_timestamp = ktime_get_boottime(); +free_ctx: + kfree(ctx); + return r; } From 29f187f71e7fd4e4516f235c400e457c50e2ab70 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 17 May 2023 18:38:00 +0530 Subject: [PATCH 578/861] drm/amd/amdgpu: Fix warnings in amdgpu_encoders.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below checkpatch warnings: WARNING: Missing a blank line after declarations + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices; WARNING: Prefer 'unsigned int' to bare use of 'unsigned' Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Acked-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 27a782a9dc72..3aaeed2d3562 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -70,6 +70,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices; DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", amdgpu_encoder->active_device, amdgpu_encoder->devices, @@ -165,12 +166,12 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; - unsigned hblank = native_mode->htotal - native_mode->hdisplay; - unsigned vblank = native_mode->vtotal - native_mode->vdisplay; - unsigned hover = native_mode->hsync_start - native_mode->hdisplay; - unsigned vover = native_mode->vsync_start - native_mode->vdisplay; - unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; - unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; + unsigned int hblank = native_mode->htotal - native_mode->hdisplay; + unsigned int vblank = native_mode->vtotal - native_mode->vdisplay; + unsigned int hover = native_mode->hsync_start - native_mode->hdisplay; + unsigned int vover = native_mode->vsync_start - native_mode->vdisplay; + unsigned int hsync_width = native_mode->hsync_end - native_mode->hsync_start; + unsigned int vsync_width = native_mode->vsync_end - native_mode->vsync_start; adjusted_mode->clock = native_mode->clock; adjusted_mode->flags = native_mode->flags; From 5d0622705ff76e017b32cb763cbc7b00694f3b92 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 17 May 2023 20:31:02 +0530 Subject: [PATCH 579/861] drm/amd/amdgpu: Fix errors & warnings in amdgpu_vcn.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below checkpatch insisted error & warnings: ERROR: space required before the open brace '{' WARNING: braces {} are not necessary for any arm of this statement + if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) { [...] + } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) { [...] + } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) { [...] ERROR: code indent should use tabs where possible WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: braces {} are not necessary for single statement blocks + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); + ERROR: space required before the open parenthesis '(' WARNING: Missing a blank line after declarations WARNING: please, no spaces at the start of a line WARNING: Symbolic permissions 'S_IRUGO' are not preferred. Consider using octal permissions '0444'. Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Acked-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 35 ++++++++++++------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 06ec2dc55857..c088111c2321 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -169,7 +169,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){ + if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) { fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); } else { @@ -276,20 +276,19 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t bool ret = false; int vcn_config = adev->vcn.vcn_config[vcn_instance]; - if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) { + if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) ret = true; - } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) { + else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) ret = true; - } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) { + else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) ret = true; - } return ret; } int amdgpu_vcn_suspend(struct amdgpu_device *adev) { - unsigned size; + unsigned int size; void *ptr; int i, idx; @@ -318,7 +317,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) int amdgpu_vcn_resume(struct amdgpu_device *adev) { - unsigned size; + unsigned int size; void *ptr; int i, idx; @@ -340,7 +339,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) adev->vcn.inst[i].saved_bo = NULL; } else { const struct common_firmware_header *hdr; - unsigned offset; + unsigned int offset; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -371,9 +370,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (adev->vcn.harvest_config & (1 << j)) continue; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + for (i = 0; i < adev->vcn.num_enc_rings; ++i) fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); - } if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; @@ -460,7 +458,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; - unsigned i; + unsigned int i; int r; /* VCN in SRIOV does not support direct register read/write */ @@ -797,7 +795,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t rptr; - unsigned i; + unsigned int i; int r; if (amdgpu_sriov_vf(adev)) @@ -1012,7 +1010,7 @@ error: enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) { - switch(ring) { + switch (ring) { case 0: return AMDGPU_RING_PRIO_0; case 1: @@ -1031,6 +1029,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { @@ -1059,7 +1058,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) */ #if defined(CONFIG_DEBUG_FS) static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) + size_t size, loff_t *pos) { struct amdgpu_vcn_inst *vcn; void *log_buf; @@ -1105,7 +1104,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, if (read_pos == AMDGPU_VCNFW_LOG_SIZE) read_pos = plog->header_size; if (read_num[i] == copy_to_user((buf + read_bytes), - (log_buf + read_pos), read_num[i])) + (log_buf + read_pos), read_num[i])) return -EFAULT; read_bytes += read_num[i]; @@ -1126,7 +1125,7 @@ static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = { #endif void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, - struct amdgpu_vcn_inst *vcn) + struct amdgpu_vcn_inst *vcn) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; @@ -1134,7 +1133,7 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, char name[32]; sprintf(name, "amdgpu_vcn_%d_fwlog", i); - debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn, + debugfs_create_file_size(name, S_IFREG | 0444, root, vcn, &amdgpu_debugfs_vcnfwlog_fops, AMDGPU_VCNFW_LOG_SIZE); #endif @@ -1148,7 +1147,7 @@ void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr - + vcn->fw_shared.log_offset; + + vcn->fw_shared.log_offset; *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); fw_log->is_enabled = 1; fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF); From 6c47a79b3b8ba91faf89f9866da2ec16aac979e7 Mon Sep 17 00:00:00 2001 From: YiPeng Chai Date: Tue, 16 May 2023 17:34:17 +0800 Subject: [PATCH 580/861] drm/amdgpu: perform mode2 reset for sdma fed error on gfx v11_0_3 perform mode2 reset for sdma fed error on gfx v11_0_3. Signed-off-by: YiPeng Chai Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 8 +++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 5 +++++ drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c | 14 +++++++++++++- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 6bb438642cc0..f2da69adcd9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2053,9 +2053,15 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) /* Perform full reset in fatal error mode */ if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - else + else { clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) { + ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET; + reset_context.method = AMD_RESET_METHOD_MODE2; + } + } + amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); } atomic_set(&ras->in_recovery, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index bc43f7db17cc..46bf1889a9d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -339,6 +339,8 @@ enum amdgpu_ras_ret { #define AMDGPU_RAS_ERR_STATUS_VALID (1 << 1) #define AMDGPU_RAS_ERR_ADDRESS_VALID (1 << 2) +#define AMDGPU_RAS_GPU_RESET_MODE2_RESET (0x1 << 0) + struct amdgpu_ras_err_status_reg_entry { uint32_t hwip; uint32_t ip_inst; @@ -427,6 +429,9 @@ struct amdgpu_ras { /* Indicates smu whether need update bad channel info */ bool update_channel_flag; + + /* Record special requirements of gpu reset caller */ + uint32_t gpu_reset_flags; }; struct ras_fs_data { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c index 068b9586a223..26d6286d86c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c @@ -84,8 +84,20 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev, /* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */ if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) && (entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) && - !entry->vmid && !entry->pasid) + !entry->vmid && !entry->pasid) { + uint32_t rlc_status0 = 0; + + rlc_status0 = RREG32_SOC15(GC, 0, regRLC_RLCS_FED_STATUS_0); + + if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) || + REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR)) { + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } + amdgpu_ras_reset_gpu(adev); + } return 0; } From f10984a353c87e696872de8f9614a58689deb0d0 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 17 May 2023 21:24:43 +0530 Subject: [PATCH 581/861] drm/amd/amdgpu: Fix errors & warnings in amdgpu _uvd, _vce.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below checkpatch errors & warnings: In amdgpu_uvd.c: WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: Prefer 'unsigned int *' to bare use of 'unsigned *' WARNING: Missing a blank line after declarations WARNING: %Lx is non-standard C, use %llx ERROR: space required before the open parenthesis '(' ERROR: space required before the open brace '{' WARNING: %LX is non-standard C, use %llX WARNING: Block comments use * on subsequent lines +/* multiple fence commands without any stream commands in between can + crash the vcpu so just try to emmit a dummy create/destroy msg to WARNING: Block comments use a trailing */ on a separate line + avoid this */ WARNING: braces {} are not necessary for single statement blocks + for (j = 0; j < adev->uvd.num_enc_rings; ++j) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); + } In amdgpu_vce.c: WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: Missing a blank line after declarations WARNING: %Lx is non-standard C, use %llx WARNING: Possible repeated word: 'we' ERROR: space required before the open parenthesis '(' Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Acked-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 83 +++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 39 ++++++------ 2 files changed, 63 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6887109abb13..b7441654e6fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -96,16 +96,16 @@ */ struct amdgpu_uvd_cs_ctx { struct amdgpu_cs_parser *parser; - unsigned reg, count; - unsigned data0, data1; - unsigned idx; + unsigned int reg, count; + unsigned int data0, data1; + unsigned int idx; struct amdgpu_ib *ib; /* does the IB has a msg command */ bool has_msg_cmd; /* minimum buffer sizes */ - unsigned *buf_sizes; + unsigned int *buf_sizes; }; #ifdef CONFIG_DRM_AMDGPU_SI @@ -186,7 +186,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; - unsigned family_id; + unsigned int family_id; int i, j, r; INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); @@ -275,7 +275,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) family_id = le32_to_cpu(hdr->ucode_version) & 0xff; if (adev->asic_type < CHIP_VEGA20) { - unsigned version_major, version_minor; + unsigned int version_major, version_minor; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; @@ -420,7 +420,7 @@ int amdgpu_uvd_entity_init(struct amdgpu_device *adev) int amdgpu_uvd_suspend(struct amdgpu_device *adev) { - unsigned size; + unsigned int size; void *ptr; int i, j, idx; bool in_ras_intr = amdgpu_ras_intr_triggered(); @@ -469,7 +469,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) int amdgpu_uvd_resume(struct amdgpu_device *adev) { - unsigned size; + unsigned int size; void *ptr; int i, idx; @@ -491,7 +491,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) adev->uvd.inst[i].saved_bo = NULL; } else { const struct common_firmware_header *hdr; - unsigned offset; + unsigned int offset; hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -542,6 +542,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) { int i; + for (i = 0; i < abo->placement.num_placement; ++i) { abo->placements[i].fpfn = 0 >> PAGE_SHIFT; abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; @@ -579,7 +580,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); if (r) { - DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); + DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); return r; } @@ -589,6 +590,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) if (cmd == 0x0 || cmd == 0x3) { /* yes, force it into VRAM */ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; + amdgpu_bo_placement_from_domain(bo, domain); } amdgpu_uvd_force_into_uvd_segment(bo); @@ -609,21 +611,21 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) * Peek into the decode message and calculate the necessary buffer sizes. */ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, - unsigned buf_sizes[]) + unsigned int buf_sizes[]) { - unsigned stream_type = msg[4]; - unsigned width = msg[6]; - unsigned height = msg[7]; - unsigned dpb_size = msg[9]; - unsigned pitch = msg[28]; - unsigned level = msg[57]; + unsigned int stream_type = msg[4]; + unsigned int width = msg[6]; + unsigned int height = msg[7]; + unsigned int dpb_size = msg[9]; + unsigned int pitch = msg[28]; + unsigned int level = msg[57]; - unsigned width_in_mb = width / 16; - unsigned height_in_mb = ALIGN(height / 16, 2); - unsigned fs_in_mb = width_in_mb * height_in_mb; + unsigned int width_in_mb = width / 16; + unsigned int height_in_mb = ALIGN(height / 16, 2); + unsigned int fs_in_mb = width_in_mb * height_in_mb; - unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; - unsigned min_ctx_size = ~0; + unsigned int image_size, tmp, min_dpb_size, num_dpb_buffer; + unsigned int min_ctx_size = ~0; image_size = width * height; image_size += image_size / 2; @@ -631,7 +633,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, switch (stream_type) { case 0: /* H264 */ - switch(level) { + switch (level) { case 30: num_dpb_buffer = 8100 / fs_in_mb; break; @@ -709,7 +711,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, break; case 7: /* H264 Perf */ - switch(level) { + switch (level) { case 30: num_dpb_buffer = 8100 / fs_in_mb; break; @@ -742,7 +744,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, /* reference picture buffer */ min_dpb_size = image_size * num_dpb_buffer; - if (!adev->uvd.use_ctx_buf){ + if (!adev->uvd.use_ctx_buf) { /* macroblock context buffer */ min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; @@ -805,7 +807,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, * Make sure that we don't open up to many sessions. */ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, - struct amdgpu_bo *bo, unsigned offset) + struct amdgpu_bo *bo, unsigned int offset) { struct amdgpu_device *adev = ctx->parser->adev; int32_t *msg, msg_type, handle; @@ -911,7 +913,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); if (r) { - DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); + DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); return r; } @@ -930,7 +932,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) if (cmd < 0x4) { if ((end - start) < ctx->buf_sizes[cmd]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, - (unsigned)(end - start), + (unsigned int)(end - start), ctx->buf_sizes[cmd]); return -EINVAL; } @@ -938,7 +940,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) } else if (cmd == 0x206) { if ((end - start) < ctx->buf_sizes[4]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, - (unsigned)(end - start), + (unsigned int)(end - start), ctx->buf_sizes[4]); return -EINVAL; } @@ -949,14 +951,14 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) if (!ctx->parser->adev->uvd.address_64_bit) { if ((start >> 28) != ((end - 1) >> 28)) { - DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", + DRM_ERROR("reloc %llx-%llx crossing 256MB boundary!\n", start, end); return -EINVAL; } if ((cmd == 0 || cmd == 0x3) && (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { - DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", + DRM_ERROR("msg/fb buffer %llx-%llx out of 256MB segment!\n", start, end); return -EINVAL; } @@ -990,7 +992,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, ctx->idx++; for (i = 0; i <= ctx->count; ++i) { - unsigned reg = ctx->reg + i; + unsigned int reg = ctx->reg + i; if (ctx->idx >= ctx->ib->length_dw) { DRM_ERROR("Register command after end of CS!\n"); @@ -1036,7 +1038,8 @@ static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) { uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx); - unsigned type = CP_PACKET_GET_TYPE(cmd); + unsigned int type = CP_PACKET_GET_TYPE(cmd); + switch (type) { case PACKET_TYPE0: ctx->reg = CP_PACKET0_GET_REG(cmd); @@ -1070,7 +1073,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, struct amdgpu_ib *ib) { struct amdgpu_uvd_cs_ctx ctx = {}; - unsigned buf_sizes[] = { + unsigned int buf_sizes[] = { [0x00000000] = 2048, [0x00000001] = 0xFFFFFFFF, [0x00000002] = 0xFFFFFFFF, @@ -1185,8 +1188,9 @@ err_free: } /* multiple fence commands without any stream commands in between can - crash the vcpu so just try to emmit a dummy create/destroy msg to - avoid this */ + * crash the vcpu so just try to emmit a dummy create/destroy msg to + * avoid this + */ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { @@ -1252,15 +1256,14 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned fences = 0, i, j; + unsigned int fences = 0, i, j; for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { if (adev->uvd.harvest_config & (1 << i)) continue; fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); - for (j = 0; j < adev->uvd.num_enc_rings; ++j) { + for (j = 0; j < adev->uvd.num_enc_rings; ++j) fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); - } } if (fences == 0) { @@ -1356,7 +1359,7 @@ error: */ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) { - unsigned i; + unsigned int i; uint32_t used_handles = 0; for (i = 0; i < adev->uvd.max_handles; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index e2b7324a70cb..1904edf68407 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -99,7 +99,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) { const char *fw_name; const struct common_firmware_header *hdr; - unsigned ucode_version, version_major, version_minor, binary_id; + unsigned int ucode_version, version_major, version_minor, binary_id; int i, r; switch (adev->asic_type) { @@ -207,7 +207,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) */ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) { - unsigned i; + unsigned int i; if (adev->vce.vcpu_bo == NULL) return 0; @@ -286,7 +286,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) { void *cpu_addr; const struct common_firmware_header *hdr; - unsigned offset; + unsigned int offset; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -332,7 +332,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, vce.idle_work.work); - unsigned i, count = 0; + unsigned int i, count = 0; for (i = 0; i < adev->vce.num_rings; i++) count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); @@ -409,6 +409,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { struct amdgpu_ring *ring = &adev->vce.ring[0]; int i, r; + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->vce.handles[i]); @@ -436,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { - const unsigned ib_size_dw = 1024; + const unsigned int ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; struct amdgpu_ib ib_msg; @@ -528,7 +529,7 @@ err: static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence) { - const unsigned ib_size_dw = 1024; + const unsigned int ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; @@ -596,12 +597,12 @@ err: */ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib, int lo, int hi, - unsigned size, int32_t index) + unsigned int size, int32_t index) { int64_t offset = ((uint64_t)size) * ((int64_t)index); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo_va_mapping *mapping; - unsigned i, fpfn, lpfn; + unsigned int i, fpfn, lpfn; struct amdgpu_bo *bo; uint64_t addr; int r; @@ -619,7 +620,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); if (r) { - DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", + DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n", addr, lo, hi, size, index); return r; } @@ -646,7 +647,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, * Patch relocation inside command stream with real buffer address */ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib, - int lo, int hi, unsigned size, uint32_t index) + int lo, int hi, unsigned int size, uint32_t index) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo; @@ -662,14 +663,14 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib, r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); if (r) { - DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", + DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n", addr, lo, hi, size, index); return r; } if ((addr + (uint64_t)size) > (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n", + DRM_ERROR("BO too small for addr 0x%010llx %d %d\n", addr, lo, hi); return -EINVAL; } @@ -692,12 +693,12 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib, * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL - * we we don't have another free session index. + * we don't have another free session index. */ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, uint32_t handle, uint32_t *allocated) { - unsigned i; + unsigned int i; /* validate the handle */ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { @@ -735,14 +736,14 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - unsigned fb_idx = 0, bs_idx = 0; + unsigned int fb_idx = 0, bs_idx = 0; int session_idx = -1; uint32_t destroyed = 0; uint32_t created = 0; uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - unsigned idx; + unsigned int idx; int i, r = 0; job->vm = NULL; @@ -1084,7 +1085,7 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, * */ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) + unsigned int flags) { WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); @@ -1106,7 +1107,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t rptr; - unsigned i; + unsigned int i; int r, timeout = adev->usec_timeout; /* skip ring test for sriov*/ @@ -1171,7 +1172,7 @@ error: enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring) { - switch(ring) { + switch (ring) { case 0: return AMDGPU_RING_PRIO_0; case 1: From e03f04b84901644c81b4348a813a8d17facbd277 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 18 May 2023 07:52:06 +0530 Subject: [PATCH 582/861] drm/amdgpu: Fix warnings in amdgpu _sdma, _ucode.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below checkpatch warnings: WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: Comparisons should place the constant on the right side of the test WARNING: Missing a blank line after declarations Cc: Luben Tuikov Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 231ca06bc9c7..9568adaad5cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -64,7 +64,7 @@ int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index) } uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, - unsigned vmid) + unsigned int vmid) { struct amdgpu_device *adev = ring->adev; uint64_t csa_mc_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index f76b1cb8baf8..16807ff96dc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -748,7 +748,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct imu_firmware_header_v1_0 *imu_hdr = NULL; u8 *ucode_addr; - if (NULL == ucode->fw) + if (!ucode->fw) return 0; ucode->mc_addr = mc_addr; @@ -972,7 +972,7 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, uint8_t *src_addr = NULL; uint8_t *dst_addr = NULL; - if (NULL == ucode->fw) + if (!ucode->fw) return 0; comm_hdr = (const struct common_firmware_header *)ucode->fw->data; @@ -1043,6 +1043,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) if (i == AMDGPU_UCODE_ID_CP_MEC1 && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { const struct gfx_firmware_header_v1_0 *cp_hdr; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset, adev->firmware.fw_buf_ptr + fw_offset); From e602157ec089240861cd641ee2c7c64eeaec09bf Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Wed, 17 May 2023 17:07:01 +0800 Subject: [PATCH 583/861] drm/amdgpu: fix S3 issue if MQD in VRAM 1. Need flush HDP for MQD putting in vram 2. Zero out mes MQD Signed-off-by: Jack Xiao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 3 +++ drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 3 +++ 3 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 8883d5ee13cb..f2d0b1d55d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -593,6 +593,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); + amdgpu_device_flush_hdp(adev, NULL); + spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + @@ -630,6 +632,8 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_map_queues) return -EINVAL; + amdgpu_device_flush_hdp(adev, NULL); + spin_lock(&kiq->ring_lock); /* No need to map kcq on the slave */ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index f1a6abdad21b..88262f10ef7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -632,6 +632,8 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; uint32_t tmp; + memset(mqd, 0, sizeof(*mqd)); + mqd->header = 0xC0310800; mqd->compute_pipelinestat_enable = 0x00000001; mqd->compute_static_thread_mgmt_se0 = 0xffffffff; @@ -728,6 +730,7 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) /* offset: 184 - this is used for CP_HQD_GFX_CONTROL */ mqd->cp_hqd_suspend_cntl_stack_offset = tmp; + amdgpu_device_flush_hdp(ring->adev, NULL); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 9791f3581786..9a48328c6572 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -704,6 +704,8 @@ static int mes_v11_0_mqd_init(struct amdgpu_ring *ring) uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; uint32_t tmp; + memset(mqd, 0, sizeof(*mqd)); + mqd->header = 0xC0310800; mqd->compute_pipelinestat_enable = 0x00000001; mqd->compute_static_thread_mgmt_se0 = 0xffffffff; @@ -797,6 +799,7 @@ static int mes_v11_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT; mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT; + amdgpu_device_flush_hdp(ring->adev, NULL); return 0; } From 48dd83c0fb6c68742f7fefca907036942dd358be Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Wed, 17 May 2023 13:31:45 -0400 Subject: [PATCH 584/861] drm/amd/display: drop redundant memset() in get_available_dsc_slices() get_available_dsc_slices() returns the number of indices set, and all of the users of get_available_dsc_slices() don't cross the returned bound when iterating over available_slices[]. So, the memset() in get_available_dsc_slices() is redundant and can be dropped. Fixes: 97bda0322b8a ("drm/amd/display: Add DSC support for Navi (v2)") Reported-by: Christophe JAILLET Reviewed-by: Rodrigo Siqueira Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index b9a05bb025db..58dd62cce4bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -645,8 +645,6 @@ static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *av { int idx = 0; - memset(available_slices, -1, MIN_AVAILABLE_SLICES_SIZE); - if (slice_caps.bits.NUM_SLICES_1) available_slices[idx++] = 1; From fd73c8507675f6bccc039cf319f183e41e447cb7 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Wed, 17 May 2023 13:49:29 -0400 Subject: [PATCH 585/861] drm/amd/display: drop unused function set_abm_event() set_abm_event() is never actually used. So, drop it. Fixes: b8fe56375f78 ("drm/amd/display: Refactor ABM feature") Reported-by: kernel test robot Reported-by: Tom Rix Reviewed-by: Rodrigo Siqueira Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c | 12 ------------ drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | 2 -- 2 files changed, 14 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index a66f83a61402..2fb9572ce25d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -131,17 +131,6 @@ static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t op return ret; } -static bool dmub_abm_set_event_ex(struct abm *abm, unsigned int full_screen, unsigned int video_mode, - unsigned int hdr_mode, unsigned int panel_inst) -{ - bool ret = false; - unsigned int feature_support; - - feature_support = abm_feature_support(abm, panel_inst); - - return ret; -} - static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, @@ -167,7 +156,6 @@ static const struct abm_funcs abm_funcs = { .init_abm_config = dmub_abm_init_config_ex, .set_abm_pause = dmub_abm_set_pause_ex, .set_pipe_ex = dmub_abm_set_pipe_ex, - .set_abm_event = dmub_abm_set_event_ex, .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index db5cf9acafe6..d2190a3320f6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -59,8 +59,6 @@ struct abm_funcs { unsigned int otg_inst, unsigned int option, unsigned int panel_inst); - bool (*set_abm_event)(struct abm *abm, unsigned int full_screen, unsigned int video_mode, - unsigned int hdr_mode, unsigned int panel_inst); }; #endif From 8789989b476b5f3bb0bf1a63b5223f6e76cfd13d Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Wed, 17 May 2023 14:09:39 -0400 Subject: [PATCH 586/861] drm/amd/display: drop unused count variable in create_eml_sink() Since, we are only interested in having drm_edid_override_connector_update(), update the value of connector->edid_blob_ptr. We don't care about the return value of drm_edid_override_connector_update() here. So, drop count. Fixes: 550e5d23f147 ("drm/amd/display: assign edid_blob_ptr with edid from debugfs") Reported-by: kernel test robot Reviewed-by: Rodrigo Siqueira Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e1c4db673dea..dfdd419310d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6397,9 +6397,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) /* if connector->edid_override valid, pass * it to edid_override to edid_blob_ptr */ - int count; - count = drm_edid_override_connector_update(&aconnector->base); + drm_edid_override_connector_update(&aconnector->base); if (!aconnector->base.edid_blob_ptr) { DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", From 04e8595819480bec2754bd2cc4377270e7e6c799 Mon Sep 17 00:00:00 2001 From: Tong Liu01 Date: Tue, 16 May 2023 14:50:04 +0800 Subject: [PATCH 587/861] drm/amdgpu: fix incorrect pcie_gen_mask in passthrough case [why] Passthrough case is treated as root bus and pcie_gen_mask is set as default value that does not support GEN 3 and GEN 4 for PCIe link speed. So PCIe link speed will be downgraded at smu hw init in passthrough condition [how] Move get pci info after detect virtualization and check if it is passthrough case when set pcie_gen_mask Signed-off-by: Tong Liu01 Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f0666230b2ed..ff9ca0dbeb5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3814,8 +3814,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - amdgpu_device_get_pcie_info(adev); - if (amdgpu_mcbp) DRM_INFO("MCBP is enabled\n"); @@ -3831,6 +3829,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* detect hw virtualization here */ amdgpu_detect_virtualization(adev); + amdgpu_device_get_pcie_info(adev); + r = amdgpu_device_get_job_timeout_settings(adev); if (r) { dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); @@ -5588,7 +5588,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; /* covers APUs as well */ - if (pci_is_root_bus(adev->pdev->bus)) { + if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) { if (adev->pm.pcie_gen_mask == 0) adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; if (adev->pm.pcie_mlw_mask == 0) From ea2a50ae3d4c9857c8967df44b0b2b42eee7d7c9 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 18 May 2023 09:30:26 -0500 Subject: [PATCH 588/861] drm/amd: Update driver-misc.html for Phoenix AMD has added marketing information publicly for Phoenix, so update the APU table with matching versions. Link: https://www.amd.com/en/product/13036 Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- Documentation/gpu/amdgpu/apu-asic-info-table.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv index 395a7b7bfaef..9ef623bd684e 100644 --- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv @@ -8,3 +8,4 @@ Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barce Ryzen 6000 series / Ryzen 7x35 series, YELLOW CARP / Rembrandt / Rembrandt+, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3 Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8 +Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11 \ No newline at end of file From c0dba2d34ac153414c4c09e073f5582eeb67a252 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 18 May 2023 09:35:17 -0500 Subject: [PATCH 589/861] drm/amd: Update driver-misc.html for Dragon Range AMD has added marketing information publicly for Dragon Range, so update the APU table with matching versions. Link: https://www.amd.com/en/product/13016 Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- Documentation/gpu/amdgpu/apu-asic-info-table.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv index 9ef623bd684e..b5c67059e9be 100644 --- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv @@ -7,5 +7,6 @@ SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0 Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1 Ryzen 6000 series / Ryzen 7x35 series, YELLOW CARP / Rembrandt / Rembrandt+, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3 Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 +Ryzen 7x45 series (FL1), / Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8 Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11 \ No newline at end of file From 109b4d8cfe4279da1cbcbcd99ae54cb2b2aee521 Mon Sep 17 00:00:00 2001 From: Su Hui Date: Mon, 15 May 2023 09:34:28 +0800 Subject: [PATCH 590/861] drm/amdgpu: remove unnecessary (void*) conversions No need cast (void*) to (struct amdgpu_device *). Signed-off-by: Su Hui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index e94d0cf3f793..c657bed350ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1638,7 +1638,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_device *dev = adev_to_drm(adev); int r = 0, i; @@ -1749,7 +1749,7 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val) static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_device *dev = adev_to_drm(adev); struct drm_file *file; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 928814492d5b..876ec35b8f83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -833,7 +833,7 @@ static const struct dma_fence_ops amdgpu_job_fence_ops = { #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; int i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index fec9df354652..74055cba3dc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -951,7 +951,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_device *dev = adev_to_drm(adev); struct drm_file *file; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index b27ac48ca123..ebeddc9a37e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -463,7 +463,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; seq_printf(m, "--------------------- DELAYED --------------------- \n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 815098be4c2f..41d047e5de69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1468,7 +1468,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b8adcebb0d89..509b26e9d0c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2257,7 +2257,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct amdgpu_device *adev = m->private; return ttm_pool_debugfs(&adev->mman.bdev.pool, m); } From e5edd9ab0e291912cf68abf5dc84d393d29a60a7 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 18 May 2023 09:40:09 -0500 Subject: [PATCH 591/861] drm/amd: Update driver-misc.html for Rembrandt-R AMD has added marketing information publicly for Rembrandt-R, so update the APU table with matching versions. Link: https://www.amd.com/en/product/13086 Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- Documentation/gpu/amdgpu/apu-asic-info-table.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv index b5c67059e9be..2e76b427ba1e 100644 --- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv @@ -5,7 +5,7 @@ Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2, 11.0.3 Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1, 10.0.1 SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0 Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1 -Ryzen 6000 series / Ryzen 7x35 series, YELLOW CARP / Rembrandt / Rembrandt+, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3 +Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3 Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 Ryzen 7x45 series (FL1), / Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5 Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8 From a2b308044dcaca8d3e580959a4f867a1d5c37fac Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Sat, 13 May 2023 14:51:00 +0200 Subject: [PATCH 592/861] drm/amdgpu: Validate VM ioctl flags. None have been defined yet, so reject anybody setting any. Mesa sets it to 0 anyway. Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 22a900f298f2..22f9a65ca0fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2370,6 +2370,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct amdgpu_fpriv *fpriv = filp->driver_priv; int r; + /* No valid flags defined yet */ + if (args->in.flags) + return -EINVAL; + switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ From b18f05a0666aecd5cb19c26a8305bcfa4e9d6502 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Wed, 17 May 2023 14:39:46 -0400 Subject: [PATCH 593/861] drm/amd/display: Fix artifacting on eDP panels when engaging freesync video mode [Why] When freesync video mode is enabled, switching resolution from native mode to one of the freesync video compatible modes can trigger continous artifacts on some eDP panels when running under KDE. The articating can be seen in the attached bug report. [How] Fix this by restricting updates that require full commit by using the same checks for stream and scaling changes in the the enable pass of dm_update_crtc_state() along with the check for compatible timings for freesync vide mode. Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2162 Fixes: da5e14909776 ("drm/amd/display: Fix hang when skipping modeset") Signed-off-by: Aurabindo Pillai Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dfdd419310d9..abef3c2f59c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9285,6 +9285,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, /* Now check if we should set freesync video mode */ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; From 1385d88c6aa774332f1a88562b6f1bf04de6d710 Mon Sep 17 00:00:00 2001 From: Sukrut Bellary Date: Wed, 3 May 2023 16:15:07 -0700 Subject: [PATCH 594/861] drm:amd:amdgpu: Fix missing buffer object unlock in failure path smatch warning - 1) drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:3615 gfx_v9_0_kiq_resume() warn: inconsistent returns 'ring->mqd_obj->tbo.base.resv'. 2) drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:6901 gfx_v10_0_kiq_resume() warn: inconsistent returns 'ring->mqd_obj->tbo.base.resv'. Signed-off-by: Sukrut Bellary Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 4 +++- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 89158c72753e..0d15002eac69 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6803,8 +6803,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v10_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index de8e70b3db75..8bf95a6b0767 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3614,8 +3614,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v9_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); From 6091ede913015fd3c07cb16298505bbd71f41689 Mon Sep 17 00:00:00 2001 From: Su Hui Date: Wed, 17 May 2023 10:52:19 +0800 Subject: [PATCH 595/861] drm/radeon: Remove unnecessary (void*) conversions No need cast (void*) to (struct radeon_device *) or (struct radeon_ring *). Signed-off-by: Su Hui Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r100.c | 8 ++++---- drivers/gpu/drm/radeon/r300.c | 2 +- drivers/gpu/drm/radeon/r420.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/radeon_fence.c | 2 +- drivers/gpu/drm/radeon/radeon_gem.c | 2 +- drivers/gpu/drm/radeon/radeon_ib.c | 2 +- drivers/gpu/drm/radeon/radeon_pm.c | 2 +- drivers/gpu/drm/radeon/radeon_ring.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/gpu/drm/radeon/rs400.c | 2 +- drivers/gpu/drm/radeon/rv515.c | 4 ++-- 12 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index d4f09ecc3d22..affa9e0309b2 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2929,7 +2929,7 @@ static void r100_set_safe_registers(struct radeon_device *rdev) #if defined(CONFIG_DEBUG_FS) static int r100_debugfs_rbbm_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t reg, value; unsigned i; @@ -2948,7 +2948,7 @@ static int r100_debugfs_rbbm_info_show(struct seq_file *m, void *unused) static int r100_debugfs_cp_ring_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t rdp, wdp; unsigned count, i, j; @@ -2974,7 +2974,7 @@ static int r100_debugfs_cp_ring_info_show(struct seq_file *m, void *unused) static int r100_debugfs_cp_csq_fifo_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t csq_stat, csq2_stat, tmp; unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; unsigned i; @@ -3022,7 +3022,7 @@ static int r100_debugfs_cp_csq_fifo_show(struct seq_file *m, void *unused) static int r100_debugfs_mc_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t tmp; tmp = RREG32(RADEON_CONFIG_MEMSIZE); diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7b0cfeaddcec..9c1a92fa2af6 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -589,7 +589,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev) #if defined(CONFIG_DEBUG_FS) static int rv370_debugfs_pcie_gart_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t tmp; tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 7e6320e8c6a0..eae8a6389f5e 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -474,7 +474,7 @@ int r420_init(struct radeon_device *rdev) #if defined(CONFIG_DEBUG_FS) static int r420_debugfs_pipes_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t tmp; tmp = RREG32(R400_GB_PIPE_SELECT); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index dd78fc499402..382795a8b3c0 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4345,7 +4345,7 @@ restart_ih: static int r600_debugfs_mc_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); DREG32_SYS(m, rdev, VM_L2_STATUS); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 73e3117420bf..2749dde5838f 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -955,7 +955,7 @@ void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) #if defined(CONFIG_DEBUG_FS) static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; int i, j; for (i = 0; i < RADEON_NUM_RINGS; ++i) { diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index bdc5af23f005..5de99ffa072f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -879,7 +879,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, #if defined(CONFIG_DEBUG_FS) static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; struct radeon_bo *rbo; unsigned i = 0; diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 6a45a72488f9..fb9ecf5dbe2b 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -292,7 +292,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) static int radeon_debugfs_sa_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index cbc554928bcc..b73fd9ab0252 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1916,7 +1916,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; struct drm_device *ddev = rdev->ddev; if ((rdev->flags & RADEON_IS_PX) && diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 7e207276df37..e6534fa9f1fb 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -464,7 +464,7 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) static int radeon_debugfs_ring_info_show(struct seq_file *m, void *unused) { - struct radeon_ring *ring = (struct radeon_ring *) m->private; + struct radeon_ring *ring = m->private; struct radeon_device *rdev = ring->rdev; uint32_t rptr, wptr, rptr_next; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2220cdf6a3f6..06a53ecc04a2 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -780,7 +780,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) static int radeon_ttm_page_pool_show(struct seq_file *m, void *data) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; return ttm_pool_debugfs(&rdev->mman.bdev.pool, m); } diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 6383f7a34bd8..922a29e58880 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -307,7 +307,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) #if defined(CONFIG_DEBUG_FS) static int rs400_debugfs_gart_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t tmp; tmp = RREG32(RADEON_HOST_PATH_CNTL); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 63fb06e8e2d7..76260fdfbaa7 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -221,7 +221,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) #if defined(CONFIG_DEBUG_FS) static int rv515_debugfs_pipes_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t tmp; tmp = RREG32(GB_PIPE_SELECT); @@ -237,7 +237,7 @@ static int rv515_debugfs_pipes_info_show(struct seq_file *m, void *unused) static int rv515_debugfs_ga_info_show(struct seq_file *m, void *unused) { - struct radeon_device *rdev = (struct radeon_device *)m->private; + struct radeon_device *rdev = m->private; uint32_t tmp; tmp = RREG32(0x2140); From b3122c92697a870d75a516537f094f589dfe7ca1 Mon Sep 17 00:00:00 2001 From: Jesse Zhang Date: Thu, 18 May 2023 09:46:22 +0800 Subject: [PATCH 596/861] drm/amdgpu: don't enable secure display on incompatible platforms [why] [drm] psp gfx command LOAD_TA(0x1) failed and response status is (0x7) [drm] psp gfx command INVOKE_CMD(0x3) failed and response status is (0x4) amdgpu 0000:04:00.0: amdgpu: Secure display: Generic Failure. [how] don't enable secure display on incompatible platforms Suggested-by: Aaron Liu Signed-off-by: Jesse zhang Reviewed-by: Aaron Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index e1b7fca09666..5f10883da6a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) if (err) return err; - return psp_init_ta_microcode(psp, ucode_prefix); + err = psp_init_ta_microcode(psp, ucode_prefix); + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) && + (adev->pdev->revision == 0xa1) && + (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) { + adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; + } + return err; } static int psp_v10_0_ring_create(struct psp_context *psp, From c796d7e039b57f9407523b8c4a3ba5358dd2d8ff Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Wed, 17 May 2023 13:49:08 +0800 Subject: [PATCH 597/861] drm/amdgpu: add the smu_v13_0_6 and gfx_v9_4_3 ip block Signed-off-by: Shiwu Zhang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index caae6bf2fd30..a0685a5ebda6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -30,6 +30,7 @@ #include "soc15.h" #include "gfx_v9_0.h" +#include "gfx_v9_4_3.h" #include "gmc_v9_0.h" #include "df_v1_7.h" #include "df_v3_6.h" @@ -1795,6 +1796,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 5): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): @@ -1893,6 +1895,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(9, 4, 2): amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); break; + case IP_VERSION(9, 4, 3): + amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); + break; case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 2): case IP_VERSION(10, 1, 1): From e825fb641b4c78a8dab5101559dd27e64d2f24aa Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Wed, 17 May 2023 13:21:15 +0800 Subject: [PATCH 598/861] drm/amdgpu: fix the memory override in kiq ring struct This is introduced by the code merge and will let the adev->gfx.kiq[0].ring struct being overrided Signed-off-by: Shiwu Zhang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f2d0b1d55d77..2b4bf6c11ae4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -448,8 +448,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings]) + adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); + if (!adev->gfx.mec.mqd_backup[j]) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } } From c77b3608b8ec79a33ac36a9d281e0395a3343d6e Mon Sep 17 00:00:00 2001 From: YuanShang Date: Mon, 8 May 2023 12:12:28 +0800 Subject: [PATCH 599/861] drm/amdgpu: Remove IMU ucode in vf2pf The IMU firmware is loaded on the host side, not the guest. Remove IMU in vf2pf ucode id enum. Signed-off-by: YuanShang Reviewed-By: Horace Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index d0ad7cb0fa05..2c1fbed24535 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -560,7 +560,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU, adev->gfx.imu_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_context.bin_desc.fw_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 24d42d24e6a0..104a5ad8397d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -70,7 +70,6 @@ enum amd_sriov_ucode_engine_id { AMD_SRIOV_UCODE_ID_RLC_SRLS, AMD_SRIOV_UCODE_ID_MEC, AMD_SRIOV_UCODE_ID_MEC2, - AMD_SRIOV_UCODE_ID_IMU, AMD_SRIOV_UCODE_ID_SOS, AMD_SRIOV_UCODE_ID_ASD, AMD_SRIOV_UCODE_ID_TA_RAS, From 2ecf927b1730a4addba7ef775f433046fbcc423b Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 21:45:51 -0400 Subject: [PATCH 600/861] drm/amdgpu: separate ras irq from vcn instance irq for UVD_POISON Separate vcn RAS poison consumption handling from the instance irq, and register dedicated ras_poison_irq src and funcs for UVD_POISON. v2: - Separate ras irq from vcn instance irq - Improve the subject and code comments v3: - Split the patch into three parts - Improve the code comments Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 27 ++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 3 +++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c088111c2321..acbef1a24b9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1188,6 +1188,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -1209,7 +1234,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) adev->vcn.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 1eb9ccd1d83d..92d5534df5f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -239,6 +239,7 @@ struct amdgpu_vcn_inst { struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; atomic_t sched_score; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_vcn_reg external; struct amdgpu_bo *dpg_sram_bo; struct dpg_pause_state pause_state; @@ -409,6 +410,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); #endif From 46d75d23005f87057881c460a94f9357d079087f Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 21:52:00 -0400 Subject: [PATCH 601/861] drm/amdgpu: add RAS POISON interrupt funcs for vcn_v2_6 Add ras_poison_irq and functions. Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 7044bd7c9f62..bb1875f926f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); + VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); if (r) return r; } @@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle) (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_6__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { .process = vcn_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = { + .set = vcn_v2_6_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) continue; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs; } } @@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v2_6_ras = { .ras_block = { .hw_ops = &vcn_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; From 66a11ecbdebbc8ab29a4076df0b95f0bfd61b1c5 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 22:09:43 -0400 Subject: [PATCH 602/861] drm/amdgpu: add RAS POISON interrupt funcs for vcn_v4_0 Add ras_poison_irq and functions. And fix the amdgpu_irq_put call trace in vcn_v4_0_hw_fini. [ 44.563572] RIP: 0010:amdgpu_irq_put+0xa4/0xc0 [amdgpu] [ 44.563629] RSP: 0018:ffffb36740edfc90 EFLAGS: 00010246 [ 44.563630] RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000000000 [ 44.563630] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 44.563631] RBP: ffffb36740edfcb0 R08: 0000000000000000 R09: 0000000000000000 [ 44.563631] R10: 0000000000000000 R11: 0000000000000000 R12: ffff954c568e2ea8 [ 44.563631] R13: 0000000000000000 R14: ffff954c568c0000 R15: ffff954c568e2ea8 [ 44.563632] FS: 0000000000000000(0000) GS:ffff954f584c0000(0000) knlGS:0000000000000000 [ 44.563632] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 44.563633] CR2: 00007f028741ba70 CR3: 000000026ca10000 CR4: 0000000000750ee0 [ 44.563633] PKRU: 55555554 [ 44.563633] Call Trace: [ 44.563634] [ 44.563634] vcn_v4_0_hw_fini+0x62/0x160 [amdgpu] [ 44.563700] vcn_v4_0_suspend+0x13/0x30 [amdgpu] [ 44.563755] amdgpu_device_ip_suspend_phase2+0x240/0x470 [amdgpu] [ 44.563806] amdgpu_device_ip_suspend+0x41/0x80 [amdgpu] [ 44.563858] amdgpu_device_pre_asic_reset+0xd9/0x4a0 [amdgpu] [ 44.563909] amdgpu_device_gpu_recover.cold+0x548/0xcf1 [amdgpu] [ 44.564006] amdgpu_debugfs_reset_work+0x4c/0x80 [amdgpu] [ 44.564061] process_one_work+0x21f/0x400 [ 44.564062] worker_thread+0x200/0x3f0 [ 44.564063] ? process_one_work+0x400/0x400 [ 44.564064] kthread+0xee/0x120 [ 44.564065] ? kthread_complete_and_exit+0x20/0x20 [ 44.564066] ret_from_fork+0x22/0x30 Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 36 ++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 81446e6996df..60c3fd20e8ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], - VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); if (r) return r; @@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle) vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1975,6 +1975,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp return 0; } +/** + * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block RAS interrupt state + */ +static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + /** * vcn_v4_0_process_interrupt - process VCN block interrupt * @@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); break; - case VCN_4_0__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { .process = vcn_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = { + .set = vcn_v4_0_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + /** * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions * @@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; } } @@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v4_0_ras = { .ras_block = { .hw_ops = &vcn_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; From 18dad20c3dcbd7789f3d07056cd78394c8278a75 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 22:57:19 -0400 Subject: [PATCH 603/861] drm/amdgpu: separate ras irq from jpeg instance irq for UVD_POISON Separate jpegbRAS poison consumption handling from the instance irq, and register dedicated ras_poison_irq src and funcs for UVD_POISON. v2: - Separate ras irq from jpeg instance irq - Improve the subject and code comments v3: - Split the patch into three parts - Improve the code comments Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 27 +++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 3 +++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 8c479669c459..3add4b4f0667 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -245,6 +245,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -266,7 +291,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) adev->jpeg.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 90516f623f56..ffe47e9f5bf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -39,6 +39,7 @@ struct amdgpu_jpeg_reg{ struct amdgpu_jpeg_inst { struct amdgpu_ring ring_dec[AMDGPU_MAX_JPEG_RINGS]; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_jpeg_reg external; uint8_t aid_id; }; @@ -78,6 +79,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ From 674f90f83bc941d0cd5a85a714c5bfeb8789163a Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 22:59:54 -0400 Subject: [PATCH 604/861] drm/amdgpu: add RAS POISON interrupt funcs for jpeg_v2_6 Add ras_poison_irq and functions. Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 28 ++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b79edb12b90e..f533ede484d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; } @@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle) if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); } return 0; @@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__JPEG_DECODE: amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec); break; - case VCN_2_6__SRCID_DJPEG0_POISON: - case VCN_2_6__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { .process = jpeg_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { + .set = jpeg_v2_6_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst[i].irq.num_types = 1; adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + + adev->jpeg.inst[i].ras_poison_irq.num_types = 1; + adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; } } @@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { .ras_block = { .hw_ops = &jpeg_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; From a34b09060a3b95c0341b444ea49558a807988e34 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 23:02:10 -0400 Subject: [PATCH 605/861] drm/amdgpu: add RAS POISON interrupt funcs for jpeg_v4_0 Add ras_poison_irq and functions. And fix the amdgpu_irq_put call trace in jpeg_v4_0_hw_fini. [ 50.497562] RIP: 0010:amdgpu_irq_put+0xa4/0xc0 [amdgpu] [ 50.497619] RSP: 0018:ffffaa2400fcfcb0 EFLAGS: 00010246 [ 50.497620] RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000000000 [ 50.497621] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 50.497621] RBP: ffffaa2400fcfcd0 R08: 0000000000000000 R09: 0000000000000000 [ 50.497622] R10: 0000000000000000 R11: 0000000000000000 R12: ffff99b2105242d8 [ 50.497622] R13: 0000000000000000 R14: ffff99b210500000 R15: ffff99b210500000 [ 50.497623] FS: 0000000000000000(0000) GS:ffff99b518480000(0000) knlGS:0000000000000000 [ 50.497623] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 50.497624] CR2: 00007f9d32aa91e8 CR3: 00000001ba210000 CR4: 0000000000750ee0 [ 50.497624] PKRU: 55555554 [ 50.497625] Call Trace: [ 50.497625] [ 50.497627] jpeg_v4_0_hw_fini+0x43/0xc0 [amdgpu] [ 50.497693] jpeg_v4_0_suspend+0x13/0x30 [amdgpu] [ 50.497751] amdgpu_device_ip_suspend_phase2+0x240/0x470 [amdgpu] [ 50.497802] amdgpu_device_ip_suspend+0x41/0x80 [amdgpu] [ 50.497854] amdgpu_device_pre_asic_reset+0xd9/0x4a0 [amdgpu] [ 50.497905] amdgpu_device_gpu_recover.cold+0x548/0xcf1 [amdgpu] [ 50.498005] amdgpu_debugfs_reset_work+0x4c/0x80 [amdgpu] [ 50.498060] process_one_work+0x21f/0x400 [ 50.498063] worker_thread+0x200/0x3f0 [ 50.498064] ? process_one_work+0x400/0x400 [ 50.498065] kthread+0xee/0x120 [ 50.498067] ? kthread_complete_and_exit+0x20/0x20 [ 50.498068] ret_from_fork+0x22/0x30 Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 28 +++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 495facb885f4..86383fbf9358 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; @@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return 0; } @@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, case VCN_4_0__SRCID__JPEG_DECODE: amdgpu_fence_process(adev->jpeg.inst->ring_dec); break; - case VCN_4_0__SRCID_DJPEG0_POISON: - case VCN_4_0__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { .process = jpeg_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = { + .set = jpeg_v4_0_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) { adev->jpeg.inst->irq.num_types = 1; adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; + + adev->jpeg.inst->ras_poison_irq.num_types = 1; + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs; } const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { @@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { .ras_block = { .hw_ops = &jpeg_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; From d7b8e68dc04ad89809832bebe9ab5d7965a6eef5 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 11:36:57 +0530 Subject: [PATCH 606/861] drm/amdgpu: Fix uninitialized variable in gfx_v9_4_3_cp_resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:1925:6: error: variable 'r' is used uninitialized whenever 'if' condition is false [-Werror,-Wsometimes-uninitialized] if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:1931:6: note: uninitialized use occurs here if (r) ^ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:1925:2: note: remove the 'if' if its condition is always true if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:1923:7: note: initialize the variable 'r' to silence this warning int r, i, num_xcc; ^ = 0 1 error generated. Suggested-by: Lijo Lazar Cc: Luben Tuikov Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index e5cfb3adb3b3..6a1a28df606c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1920,7 +1920,7 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r, i, num_xcc; + int r = 0, i, num_xcc; if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE) == From b336c681bdb5d1814acf8f19d1225a93f36ddfa2 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Fri, 19 May 2023 16:01:57 +0800 Subject: [PATCH 607/861] drm/amdgpu: fix vga_set_state NULL pointer issue Fix NULL pointer issue for vga_set_state function as not all the ASIC need this operation. Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a4d0bc80ac92..8b4a89a6c3fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1241,7 +1241,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); /* * ASICs macro. */ -#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) +#define amdgpu_asic_set_vga_state(adev, state) \ + ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) From 9788d087caffd8358d6e14349ee69d9385666719 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Fri, 5 May 2023 13:16:32 -0400 Subject: [PATCH 608/861] drm/amd/display: improve the message printed when loading DC [Why&How] Change how DC version and hardware version is printed when driver is loaded. - Remove exclamation - Add DC version and hardware version to both success and failure cases - Add version in between appropriate filler words to make a complete statement. Reviewed-by: Harry Wentland Acked-by: Tom Chung Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index abef3c2f59c8..4edbcc9e535b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1667,10 +1667,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { - DRM_INFO("Display Core initialized with v%s! %s\n", DC_VER, + DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); + DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER, + dce_version_to_string(adev->dm.dc->ctx->dce_version)); goto error; } From 20a29ac09192f9d9705015261652f277e8162f09 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Fri, 19 May 2023 18:54:18 +0800 Subject: [PATCH 609/861] drm/amdgpu: retire set_vga_state for some ASIC set_vga_state operation only allowed on SI generation ASIC, retire the realted function on those ASIC which did not do anything. Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 6 ------ drivers/gpu/drm/amd/amdgpu/soc15.c | 8 -------- drivers/gpu/drm/amd/amdgpu/soc21.c | 6 ------ drivers/gpu/drm/amd/amdgpu/vi.c | 6 ------ 4 files changed, 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index a5f76c9538c4..51523b27a186 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -341,11 +341,6 @@ void nv_grbm_select(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); } -static void nv_vga_set_state(struct amdgpu_device *adev, bool state) -{ - /* todo */ -} - static bool nv_read_disabled_bios(struct amdgpu_device *adev) { /* todo */ @@ -654,7 +649,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .read_register = &nv_read_register, .reset = &nv_asic_reset, .reset_method = &nv_asic_reset_method, - .set_vga_state = &nv_vga_set_state, .get_xclk = &nv_get_xclk, .set_uvd_clocks = &nv_set_uvd_clocks, .set_vce_clocks = &nv_set_vce_clocks, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 122ba1a505c3..135440b5afe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -346,11 +346,6 @@ void soc15_grbm_select(struct amdgpu_device *adev, WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl); } -static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) -{ - /* todo */ -} - static bool soc15_read_disabled_bios(struct amdgpu_device *adev) { /* todo */ @@ -849,7 +844,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .read_register = &soc15_read_register, .reset = &soc15_asic_reset, .reset_method = &soc15_asic_reset_method, - .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, @@ -871,7 +865,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .read_register = &soc15_read_register, .reset = &soc15_asic_reset, .reset_method = &soc15_asic_reset_method, - .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, @@ -893,7 +886,6 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .read_register = &soc15_read_register, .reset = &soc15_asic_reset, .reset_method = &soc15_asic_reset_method, - .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 6bff936a6e55..e5e5d68a4d70 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -248,11 +248,6 @@ void soc21_grbm_select(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl); } -static void soc21_vga_set_state(struct amdgpu_device *adev, bool state) -{ - /* todo */ -} - static bool soc21_read_disabled_bios(struct amdgpu_device *adev) { /* todo */ @@ -559,7 +554,6 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = .read_register = &soc21_read_register, .reset = &soc21_asic_reset, .reset_method = &soc21_asic_reset_method, - .set_vga_state = &soc21_vga_set_state, .get_xclk = &soc21_get_xclk, .set_uvd_clocks = &soc21_set_uvd_clocks, .set_vce_clocks = &soc21_set_vce_clocks, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 8e70581960fb..770f2d7a371f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -580,11 +580,6 @@ void vi_srbm_select(struct amdgpu_device *adev, WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); } -static void vi_vga_set_state(struct amdgpu_device *adev, bool state) -{ - /* todo */ -} - static bool vi_read_disabled_bios(struct amdgpu_device *adev) { u32 bus_cntl; @@ -1435,7 +1430,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .read_register = &vi_read_register, .reset = &vi_asic_reset, .reset_method = &vi_asic_reset_method, - .set_vga_state = &vi_vga_set_state, .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, From ff6b11cc7263d4d6f17bc7b94e81ffcaae5fe107 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 16:24:55 +0530 Subject: [PATCH 610/861] drm/amd/amdgpu: Fix errors & warnings in mmhub_v1_8.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below errors & warnings reported by checkpatch: ERROR: code indent should use tabs where possible WARNING: please, no space before tabs WARNING: please, no spaces at the start of a line WARNING: Prefer 'unsigned int' to bare use of 'unsigned' ERROR: space prohibited before that '++' (ctx:WxB) WARNING: Block comments use a trailing */ on a separate line Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 3648994724c2..8600e42434e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -271,7 +271,7 @@ static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) VMC_TAP_PTE_REQUEST_PHYSICAL, 0); } WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp); - } + } } static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) @@ -328,7 +328,7 @@ static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - unsigned num_level, block_size; + unsigned int num_level, block_size; uint32_t tmp, inst_mask; int i, j; @@ -776,9 +776,10 @@ static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev, /* reset mmea ras err status */ mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL; mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; - for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i ++) { + for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) { /* force clk branch on for response path - * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1 */ + * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1 + */ reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, regMMEA0_CGTT_CLK_CTRL, i * mmea_cgtt_clk_cntl_addr_dist); @@ -814,7 +815,8 @@ static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev, /* reset mm_cane ras err status * force clk branch on for response path - * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1 */ + * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1 + */ reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, SOFT_OVERRIDE_ATRET, 1); From b3b0e016ec44d94db48a7d01b69570b5de37a31c Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 10:34:50 +0530 Subject: [PATCH 611/861] drm/amdgpu: Fix uninitalized variable in jpeg_v4_0_3_is_idle & jpeg_v4_0_3_wait_for_idle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c:752:4: error: variable 'ret' is uninitialized when used here [-Werror,-Wuninitialized] ret &= ((RREG32_SOC15_OFFSET( ^~~ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c:745:10: note: initialize the variable 'ret' to silence this warning bool ret; ^ = 0 drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c:774:4: error: variable 'ret' is uninitialized when used here [-Werror,-Wuninitialized] ret &= SOC15_WAIT_ON_RREG_OFFSET( ^~~ drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c:767:9: note: initialize the variable 'ret' to silence this warning int ret; ^ = 0 2 errors generated. Cc: Luben Tuikov Cc: Alex Deucher Cc: Christian König Cc: James Zhu Cc: Leo Liu Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index ede15a3a4701..ce2b22f7e4e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -742,7 +742,7 @@ static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) static bool jpeg_v4_0_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool ret; + bool ret = false; int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -764,7 +764,7 @@ static bool jpeg_v4_0_3_is_idle(void *handle) static int jpeg_v4_0_3_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret; + int ret = 0; int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { From 2ad00e753ae13b0c523a579fb04372787f77cce9 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 10:44:41 +0530 Subject: [PATCH 612/861] drm/amdgpu: Fix uninitalized variable in kgd2kfd_device_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_device.c:613:4: error: variable 'num_xcd' is uninitialized when used here [-Werror,-Wuninitialized] num_xcd, kfd->adev->gfx.num_xcc_per_xcp); ^~~~~~~ include/linux/dev_printk.h:144:65: note: expanded from macro 'dev_err' dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__) ^~~~~~~~~~~ include/linux/dev_printk.h:110:23: note: expanded from macro 'dev_printk_index_wrap' _p_func(dev, fmt, ##__VA_ARGS__); \ ^~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_device.c:597:13: note: initialize the variable 'num_xcd' to silence this warning int num_xcd, partition_mode; ^ = 0 1 error generated. Cc: Luben Tuikov Cc: Alex Deucher Cc: Christian König Cc: Felix Kuehling Cc: Mukul Joshi Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 986543a000bf..e92b93b2c14c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -594,7 +594,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; - int num_xcd, partition_mode; + int partition_mode; int xcp_idx; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, @@ -609,8 +609,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->num_nodes == 0) { dev_err(kfd_device, - "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", - num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + "KFD num nodes cannot be 0, num_xcc_in_node: %d\n", + kfd->adev->gfx.num_xcc_per_xcp); goto out; } From 4c44a51c78447ae97a6d7ef27ffdb790c3f61cca Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 18 May 2023 12:38:22 -0400 Subject: [PATCH 613/861] drm/radeon: reintroduce radeon_dp_work_func content Put back the radeon_dp_work_func logic. It seems that handling DP RX interrupts is necessary to make some panels work. This was removed with the MST support, but it regresses some systems so add it back. While we are here, add the proper mutex locking. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2567 Fixes: 01ad1d9c2888 ("drm/radeon: Drop legacy MST support") Reviewed-by: Lyude Paul Signed-off-by: Alex Deucher Cc: Lyude Paul --- drivers/gpu/drm/radeon/radeon_irq_kms.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 3377fbc71f65..c4dda908666c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work) static void radeon_dp_work_func(struct work_struct *work) { + struct radeon_device *rdev = container_of(work, struct radeon_device, + dp_work); + struct drm_device *dev = rdev->ddev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + mutex_lock(&mode_config->mutex); + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); + mutex_unlock(&mode_config->mutex); } /** From 73ade646c545feda7c5df9b9c78c5d011ce76463 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 18 May 2023 15:51:38 +0200 Subject: [PATCH 614/861] drm/amdgpu: stop including swiotlb.h amdgpu does not need swiotlb.h, so stop including it. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 509b26e9d0c4..0abad5f89421 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include From f36f2648f32c184ffc285a836b1ce3757e966925 Mon Sep 17 00:00:00 2001 From: Cruise Hung Date: Tue, 9 May 2023 21:36:35 +0800 Subject: [PATCH 615/861] drm/amd/display: Fix DMUB debugging print issue [Why] The DMUB diagnostic data was not printed out correctly. [How] Print the DMUB diagnostic data line by line. Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Cruise Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 12 +++ drivers/gpu/drm/amd/display/dc/dc.h | 2 + drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 103 +++++++------------ 3 files changed, 49 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e664a77d05eb..9f8efee27721 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4832,6 +4832,18 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); } +/** + * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging + * + * @dc: [in] dc structure + * + * + */ +void dc_print_dmub_diagnostic_data(const struct dc *dc) +{ + dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); +} + /** * dc_disable_accelerated_mode - disable accelerated mode * @dc: dc structure diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 2dff1a5cf3b1..7fc087f85d39 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -2236,6 +2236,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable); +void dc_print_dmub_diagnostic_data(const struct dc *dc); + /* DSC Interfaces */ #include "dc_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 0319a30f2d5c..c52c40b16387 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -169,6 +169,7 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } @@ -797,74 +798,40 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) return; } - DC_LOG_DEBUG( - "DMCUB STATE\n" - " dmcub_version : %08x\n" - " scratch [0] : %08x\n" - " scratch [1] : %08x\n" - " scratch [2] : %08x\n" - " scratch [3] : %08x\n" - " scratch [4] : %08x\n" - " scratch [5] : %08x\n" - " scratch [6] : %08x\n" - " scratch [7] : %08x\n" - " scratch [8] : %08x\n" - " scratch [9] : %08x\n" - " scratch [10] : %08x\n" - " scratch [11] : %08x\n" - " scratch [12] : %08x\n" - " scratch [13] : %08x\n" - " scratch [14] : %08x\n" - " scratch [15] : %08x\n" - " pc : %08x\n" - " unk_fault_addr : %08x\n" - " inst_fault_addr : %08x\n" - " data_fault_addr : %08x\n" - " inbox1_rptr : %08x\n" - " inbox1_wptr : %08x\n" - " inbox1_size : %08x\n" - " inbox0_rptr : %08x\n" - " inbox0_wptr : %08x\n" - " inbox0_size : %08x\n" - " is_enabled : %d\n" - " is_soft_reset : %d\n" - " is_secure_reset : %d\n" - " is_traceport_en : %d\n" - " is_cw0_en : %d\n" - " is_cw6_en : %d\n", - diag_data.dmcub_version, - diag_data.scratch[0], - diag_data.scratch[1], - diag_data.scratch[2], - diag_data.scratch[3], - diag_data.scratch[4], - diag_data.scratch[5], - diag_data.scratch[6], - diag_data.scratch[7], - diag_data.scratch[8], - diag_data.scratch[9], - diag_data.scratch[10], - diag_data.scratch[11], - diag_data.scratch[12], - diag_data.scratch[13], - diag_data.scratch[14], - diag_data.scratch[15], - diag_data.pc, - diag_data.undefined_address_fault_addr, - diag_data.inst_fetch_fault_addr, - diag_data.data_write_fault_addr, - diag_data.inbox1_rptr, - diag_data.inbox1_wptr, - diag_data.inbox1_size, - diag_data.inbox0_rptr, - diag_data.inbox0_wptr, - diag_data.inbox0_size, - diag_data.is_dmcub_enabled, - diag_data.is_dmcub_soft_reset, - diag_data.is_dmcub_secure_reset, - diag_data.is_traceport_en, - diag_data.is_cw0_enabled, - diag_data.is_cw6_enabled); + DC_LOG_DEBUG("DMCUB STATE:"); + DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version); + DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]); + DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]); + DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]); + DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]); + DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]); + DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]); + DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]); + DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]); + DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]); + DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]); + DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]); + DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]); + DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]); + DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); + DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); + DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); + DC_LOG_DEBUG(" pc : %08x", diag_data.pc); + DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); + DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); + DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); + DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr); + DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr); + DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size); + DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); + DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); + DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); + DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); + DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); + DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); + DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en); + DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled); + DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled); } static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) From 75589226372ce5255ffade2ec6dea862338f7595 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 9 May 2023 09:38:29 -0400 Subject: [PATCH 616/861] drm/amd/display: disable dcn315 pixel rate crb when scaling The rough calculation does not account for scaling. Also, make 2 segments the minimum allowed per surface to avoid potential 0 detile with mpc/odm combine on such outputs. Reviewed-by: Ariel Bernstein Acked-by: Tom Chung Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn315/dcn315_resource.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 42a0157fd813..c6bc2d603ab8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1666,6 +1666,14 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) if (!res_ctx->pipe_ctx[i].stream) continue; + /*Don't apply if scaling*/ + if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width || + res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height || + (res_ctx->pipe_ctx[i].top_pipe->plane_state && (res_ctx->pipe_ctx[i].top_pipe->plane_state->src_rect.width + != res_ctx->pipe_ctx[i].top_pipe->plane_state->dst_rect.width || + res_ctx->pipe_ctx[i].top_pipe->plane_state->src_rect.height + != res_ctx->pipe_ctx[i].top_pipe->plane_state->dst_rect.height))) + return false; /*Don't apply if MPO to avoid transition issues*/ if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) return false; @@ -1715,10 +1723,15 @@ static int dcn315_populate_dml_pipes_from_context( /* Ceil to crb segment size */ int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB); + if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) { bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS; split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc); split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + /* Minimum 2 segments to allow mpc/odm combine if its used later */ + if (approx_det_segs_required_for_pstate < 2) + approx_det_segs_required_for_pstate = 2; if (split_required) approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2; pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate; From 6812d74803740100a0c422b9bc1fda947af4da6a Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 8 May 2023 16:08:37 -0400 Subject: [PATCH 617/861] drm/amd/display: Update SR watermarks for DCN314 [Why & How] Update parameters for SR watermarks for DCN314 Reviewed-by: Charlene Liu Acked-by: Tom Chung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 4d5cd59f6433..2a0c696f5861 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -403,32 +403,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 16.5, - .sr_enter_plus_exit_time_us = 18.5, + .sr_exit_time_us = 30.0, + .sr_enter_plus_exit_time_us = 32.0, .valid = true, }, } From cd465a670087f94e62100622f9cbb894f524268a Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Tue, 2 May 2023 17:54:50 +0800 Subject: [PATCH 618/861] drm/amd/display: Fix warning in disabling vblank irq [Why] During gpu-reset, we toggle vblank irq by calling dc_interrupt_set() instead of amdgpu_irq_get/put() because we don't want to change the irq source's refcount. However, we see the warning when vblank irq is enabled by dc_interrupt_set() during gpu-reset but disabled by amdgpu_irq_put() after gpu-reset. [How] Only in dm_gpureset_toggle_interrupts() we toggle vblank interrupts by calling dc_interrupt_set(). Apart from this we call dm_set_vblank() which uses amdgpu_irq_get/put() to operate vblank irq. Reviewed-by: Bhawanpreet Lakha Acked-by: Tom Chung Signed-off-by: Alan Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 21 ++++++++++++------- .../amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 16 +++----------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4edbcc9e535b..f59cb4d659dc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2475,20 +2475,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, if (acrtc && state->stream_status[i].plane_count != 0) { irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; - DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", - acrtc->crtc_id, enable ? "en" : "dis", rc); if (rc) DRM_WARN("Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); if (enable) { - rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base); - if (rc) - DRM_WARN("Failed to enable vblank interrupts\n"); - } else { - amdgpu_dm_crtc_disable_vblank(&acrtc->base); - } + if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); + if (rc) + DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index e3762e806617..440fc0869a34 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work) static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { - enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); @@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - if (amdgpu_in_reset(adev)) { - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; - /* During gpu-reset we disable and then enable vblank irq, so - * don't use amdgpu_irq_get/put() to avoid refcount change. - */ - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - rc = -EBUSY; - } else { - rc = (enable) - ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) - : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); - } + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); if (rc) return rc; From 38ff516bb00cd8e974c8b5e70ab6e1b354b8f424 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Mon, 24 Apr 2023 11:39:20 -0400 Subject: [PATCH 619/861] drm/amd/display: lower dp link training message level [Why] some test apps report dp link training waring even dp training pass. there are 4 tries of lt within perform_link_training_with_retries. if lt pass within 4 tries, it will NOT be reated as lt failure. for each try of lt, if lt fails, current driver implementation prints message at warning level. this let people think dp lt does not work properly. [How] for 1st, 2nd and 3rd try of lt, print message at debug level. for the 4th try of lt, print message at warning level. Reviewed-by: Jerry Zuo Acked-by: Tom Chung Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../dc/link/protocols/link_dp_training.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 579fa222810d..e011df4bdaf2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1653,10 +1653,19 @@ bool perform_link_training_with_retries( break; } - DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", - __func__, link->link_index, (unsigned int)j + 1, attempts, - cur_link_settings.link_rate, cur_link_settings.lane_count, - cur_link_settings.link_spread, status); + if (j == (attempts - 1)) { + DC_LOG_WARNING( + "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread, status); + } else { + DC_LOG_HW_LINK_TRAINING( + "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread, status); + } dp_disable_link_phy(link, &pipe_ctx->link_res, signal); From 0ab720d506252a28983baabafa2605eb6c94b1d7 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 10 May 2023 15:27:19 -0400 Subject: [PATCH 620/861] drm/amd/display: fix dcn315 pixel rate crb scaling check fix dcn315 pixel rate crb scaling check error Reviewed-by: Charlene Liu Acked-by: Tom Chung Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index c6bc2d603ab8..95fd3d087ea3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1669,10 +1669,10 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) /*Don't apply if scaling*/ if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width || res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height || - (res_ctx->pipe_ctx[i].top_pipe->plane_state && (res_ctx->pipe_ctx[i].top_pipe->plane_state->src_rect.width - != res_ctx->pipe_ctx[i].top_pipe->plane_state->dst_rect.width || - res_ctx->pipe_ctx[i].top_pipe->plane_state->src_rect.height - != res_ctx->pipe_ctx[i].top_pipe->plane_state->dst_rect.height))) + (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->src_rect.width + != res_ctx->pipe_ctx[i].plane_state->dst_rect.width || + res_ctx->pipe_ctx[i].plane_state->src_rect.height + != res_ctx->pipe_ctx[i].plane_state->dst_rect.height))) return false; /*Don't apply if MPO to avoid transition issues*/ if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) From 91b38ca1b331ef1af3b77e2ffdb41654e0fba127 Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Wed, 10 May 2023 16:43:30 -0400 Subject: [PATCH 621/861] drm/amd/display: Have Payload Properly Created After Resume At drm suspend sequence, MST dc_sink is removed. When commit cached MST stream back in drm resume sequence, the MST stream payload is not properly created and added into the payload table. After resume, topology change is reprobed by removing existing streams first. That leads to no payload is found in the existing payload table as below error "[drm] ERROR No payload for [MST PORT:] found in mst state" 1. In encoder .atomic_check routine, remove check existance of dc_sink 2. Bypass MST by checking existence of MST root port. dc_link_type cannot differentiate MST port before topology is rediscovered. Reviewed-by: Wayne Lin Acked-by: Tom Chung Signed-off-by: Fangzhi Zuo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f59cb4d659dc..c98c57deb5eb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2853,7 +2853,7 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector && aconnector->mst_root) continue; mutex_lock(&aconnector->hpd_lock); @@ -6776,7 +6776,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; - if (!aconnector->mst_output_port || !aconnector->dc_sink) + if (!aconnector->mst_output_port) return 0; mst_port = aconnector->mst_output_port; From 3e8d74cb128fb1a4d56270ffbecea6056c55739a Mon Sep 17 00:00:00 2001 From: Saaem Rizvi Date: Tue, 9 May 2023 14:41:59 -0400 Subject: [PATCH 622/861] drm/amd/display: Trigger DIO FIFO resync on commit streams [WHY] Currently, there is an intermittent issue where a screen can either go blank or be corrupted. [HOW] To resolve the issue we trigger the ramping logic for DIO FIFO so that it goes back up to the correct speed. Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Saaem Rizvi Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/dce110/dce110_hw_sequencer.c | 3 +++ .../gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 4 ++- .../drm/amd/display/dc/dcn314/dcn314_dccg.c | 11 ++++++++ .../drm/amd/display/dc/dcn314/dcn314_dccg.h | 5 +++- .../drm/amd/display/dc/dcn314/dcn314_hwseq.c | 27 +++++++++++++++++++ .../drm/amd/display/dc/dcn314/dcn314_hwseq.h | 2 ++ .../drm/amd/display/dc/dcn314/dcn314_init.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 3 +++ .../amd/display/dc/inc/hw_sequencer_private.h | 2 ++ 9 files changed, 56 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 8d2460d06bce..1a0be40d125c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2291,6 +2291,9 @@ enum dc_status dce110_apply_ctx_to_hw( if (DC_OK != status) return status; + + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); } if (dc->fbc_compressor) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 7bdc146f7cb5..c8602bcfa393 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -208,7 +208,9 @@ #define DCCG314_REG_FIELD_LIST(type) \ type DSCCLK3_DTO_PHASE;\ type DSCCLK3_DTO_MODULO;\ - type DSCCLK3_DTO_ENABLE; + type DSCCLK3_DTO_ENABLE;\ + type DENTIST_DISPCLK_RDIVIDER;\ + type DENTIST_DISPCLK_WDIVIDER; #define DCCG32_REG_FIELD_LIST(type) \ type DPSTREAMCLK0_EN;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index de7bfba2c179..e0e7d32bb1a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -45,6 +45,16 @@ #define DC_LOGGER \ dccg->ctx->logger +static void dccg314_trigger_dio_fifo_resync( + struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t dispclk_rdivider_value = 0; + + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); +} + static void dccg314_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -357,6 +367,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, .set_pixel_rate_div = dccg314_set_pixel_rate_div, + .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h index 90687a9e8fdd..8e07d3151f91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h @@ -192,7 +192,10 @@ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\ - DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh) + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh) struct dccg *dccg314_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index f7a3e0d71d01..70fac2ebb757 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -390,6 +390,33 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) pix_per_cycle); } +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +{ + uint8_t i; + struct pipe_ctx *pipe = NULL; + bool otg_disabled[MAX_PIPES] = {false}; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->top_pipe || pipe->prev_odm_pipe) + continue; + + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); + otg_disabled[i] = true; + } + } + + hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (otg_disabled[i]) + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } +} + void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on) { if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index 6d0b62503caa..559d71002e8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -41,6 +41,8 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 7a28c7bb25d2..90be62c05822 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -152,6 +152,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = { .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn314_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio, }; void dcn314_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index ad6acd1b34e1..0b700b3d7d97 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -159,6 +159,9 @@ struct dccg_funcs { int otg_inst, int pixclk_khz); + void (*trigger_dio_fifo_resync)( + struct dccg *dccg); + void (*dpp_root_clock_control)( struct dccg *dccg, unsigned int dpp_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index 4513544559be..4ca4192c1e12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -160,6 +160,8 @@ struct hwseq_private_funcs { unsigned int *k1_div, unsigned int *k2_div); void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); + void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, + struct dc_state *context); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif }; From c02b04633c4f4654331c53966cb937df1c73a9bb Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Thu, 11 May 2023 09:12:09 -0400 Subject: [PATCH 623/861] drm/amd/display: Revert vblank change that causes null pointer crash Revert commit 1a4bcdbea431 ("drm/amd/display: Fix possible underflow for displays with large vblank") Because it cause some regression Fixes: 1a4bcdbea431 ("drm/amd/display: Fix possible underflow for displays with large vblank") Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dml/dcn314/dcn314_fpu.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 554152371eb5..1d00eb9e73c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -33,7 +33,7 @@ #include "dml/display_mode_vba.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { - .VBlankNomDefaultUS = 800, + .VBlankNomDefaultUS = 668, .gpuvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_enable = 1, @@ -286,7 +286,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool upscaled = false; - const unsigned int max_allowed_vblank_nom = 1023; + bool isFreesyncVideo = false; dc_assert_fp_enabled(); @@ -300,11 +300,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; - pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; - pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); - pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); - pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); + isFreesyncVideo = pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min; + isFreesyncVideo = isFreesyncVideo && pipe->stream->adjust.v_total_min > timing->v_total; + + if (!isFreesyncVideo) { + pipes[pipe_cnt].pipe.dest.vblank_nom = + dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0)); + } else { + pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; + } if (pipe->plane_state && (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || From de231189e7bf1a38c0f889ec5f8911af473aa792 Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Thu, 11 May 2023 10:51:27 -0400 Subject: [PATCH 624/861] drm/amd/display: Fix possible underflow for displays with large vblank [Why] Underflow observed when using a display with a large vblank region and low refresh rate [How] Simplify calculation of vblank_nom Increase value for VBlankNomDefaultUS to 800us Fixed a null pointer from previous commit of this change Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dml/dcn314/dcn314_fpu.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 1d00eb9e73c6..554152371eb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -33,7 +33,7 @@ #include "dml/display_mode_vba.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { - .VBlankNomDefaultUS = 668, + .VBlankNomDefaultUS = 800, .gpuvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_enable = 1, @@ -286,7 +286,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; bool upscaled = false; - bool isFreesyncVideo = false; + const unsigned int max_allowed_vblank_nom = 1023; dc_assert_fp_enabled(); @@ -300,16 +300,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; - isFreesyncVideo = pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min; - isFreesyncVideo = isFreesyncVideo && pipe->stream->adjust.v_total_min > timing->v_total; - - if (!isFreesyncVideo) { - pipes[pipe_cnt].pipe.dest.vblank_nom = - dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0)); - } else { - pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; - pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; - } + pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); + pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); if (pipe->plane_state && (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || From 7e60ab4eb3e4ba2adac46d737fdbbc5732bebd58 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 11 May 2023 15:25:01 -0400 Subject: [PATCH 625/861] drm/amd/display: Apply 60us prefetch for DCFCLK <= 300Mhz [Description] - Previously we wanted to apply extra 60us of prefetch for min DCFCLK (200Mhz), but DCFCLK can be calculated to be 201Mhz which underflows also without the extra prefetch - Instead, apply the the extra 60us prefetch for any DCFCLK freq <= 300Mhz Reviewed-by: Nevenko Stupar Reviewed-by: Jun Lei Acked-by: Tom Chung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c | 4 ++-- .../gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 958d27224f64..cbdfb762c10c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -811,7 +811,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightC[k], TWait, (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || - v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -3315,7 +3315,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index d98e36a9a09c..c4745d63039b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,7 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff #define MEM_STROBE_FREQ_MHZ 1600 -#define MIN_DCFCLK_FREQ_MHZ 200 +#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 struct display_mode_lib; From 25879d7b4986beba3f0d84762fe40d09fdc8b219 Mon Sep 17 00:00:00 2001 From: Qingqing Zhuo Date: Thu, 16 Mar 2023 09:05:58 -0400 Subject: [PATCH 626/861] drm/amd/display: Clean FPGA code in dc [Why] Drop dead code for Linux. [How] Remove all IS_FPGA_MAXIMUS_DC and IS_DIAG_DC Reviewed-by: Ariel Bernstein Acked-by: Tom Chung Signed-off-by: Qingqing Zhuo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../dc/clk_mgr/dce112/dce112_clk_mgr.c | 20 +- .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 10 +- .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 66 +++-- .../dc/clk_mgr/dcn201/dcn201_clk_mgr.c | 24 +- .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 71 +++-- .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c | 17 +- .../display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 32 +-- .../display/dc/clk_mgr/dcn301/vg_clk_mgr.c | 54 ++-- .../display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 42 ++- .../dc/clk_mgr/dcn314/dcn314_clk_mgr.c | 38 ++- .../dc/clk_mgr/dcn315/dcn315_clk_mgr.c | 40 ++- .../dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 57 ++-- .../display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 1 - drivers/gpu/drm/amd/display/dc/core/dc.c | 40 ++- .../gpu/drm/amd/display/dc/core/dc_stream.c | 30 --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 6 +- drivers/gpu/drm/amd/display/dc/dc_types.h | 7 - .../drm/amd/display/dc/dce/dce_clock_source.c | 38 --- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 8 +- .../display/dc/dce110/dce110_hw_sequencer.c | 3 - .../display/dc/dce112/dce112_hw_sequencer.c | 3 - .../display/dc/dce120/dce120_hw_sequencer.c | 3 - .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 122 ++++----- .../gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 8 +- .../drm/amd/display/dc/dcn10/dcn10_resource.c | 5 +- .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 47 ++-- .../gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 4 - .../gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 7 +- .../drm/amd/display/dc/dcn20/dcn20_resource.c | 15 +- .../drm/amd/display/dc/dcn201/dcn201_hwseq.c | 73 ++--- .../drm/amd/display/dc/dcn201/dcn201_optc.c | 7 +- .../amd/display/dc/dcn201/dcn201_resource.c | 5 +- .../gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 4 - .../drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +- .../drm/amd/display/dc/dcn30/dcn30_hwseq.c | 54 +--- .../gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 4 - .../gpu/drm/amd/display/dc/dcn30/dcn30_optc.c | 7 +- .../drm/amd/display/dc/dcn30/dcn30_resource.c | 8 +- .../amd/display/dc/dcn301/dcn301_resource.c | 10 +- .../amd/display/dc/dcn302/dcn302_resource.c | 5 +- .../amd/display/dc/dcn303/dcn303_resource.c | 5 +- .../drm/amd/display/dc/dcn31/dcn31_hwseq.c | 98 +++---- .../gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 4 - .../drm/amd/display/dc/dcn31/dcn31_resource.c | 17 +- .../drm/amd/display/dc/dcn314/dcn314_init.c | 4 - .../amd/display/dc/dcn314/dcn314_resource.c | 10 +- .../amd/display/dc/dcn315/dcn315_resource.c | 17 +- .../amd/display/dc/dcn316/dcn316_resource.c | 17 +- .../gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 4 - .../drm/amd/display/dc/dcn32/dcn32_resource.c | 13 +- .../amd/display/dc/dcn321/dcn321_resource.c | 13 +- .../amd/display/dc/dml/dcn301/dcn301_fpu.c | 68 +++-- .../drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 221 +++++++-------- .../dc/dml/dcn31/display_rq_dlg_calc_31.c | 8 - .../amd/display/dc/dml/dcn314/dcn314_fpu.c | 6 +- .../dc/dml/dcn314/display_rq_dlg_calc_314.c | 8 - .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 142 +++++----- .../amd/display/dc/dml/dcn321/dcn321_fpu.c | 142 +++++----- .../drm/amd/display/dc/dml/display_mode_lib.c | 1 - .../drm/amd/display/dc/dml/display_mode_lib.h | 1 - .../display/dc/link/hwss/link_hwss_hpo_dp.c | 83 +----- .../gpu/drm/amd/display/dc/link/link_dpms.c | 254 +++++++++--------- .../dc/link/protocols/link_dp_capability.c | 3 +- 63 files changed, 811 insertions(+), 1333 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 934e6423dc1a..1f36ad8a7de4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -111,12 +111,10 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); } clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; @@ -153,12 +151,10 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); } clk_mgr->dfs_bypass_disp_clk = actual_clock; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 450eaead4f20..89b79dd39628 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -135,12 +135,10 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_dispclk_set_mhz / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_dispclk_set_mhz / 7); } return actual_dispclk_set_mhz * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 650f3b4b562e..c435f7632e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -531,6 +531,11 @@ void dcn20_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + int dprefclk_did; + int target_div; + uint32_t pll_req_reg; + struct fixed31_32 pll_req; + clk_mgr->base.ctx = ctx; clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; @@ -547,42 +552,34 @@ void dcn20_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; + /* DFS Slice 2 should be used for DPREFCLK */ + dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL); + /* Convert DPREFCLK DFS Slice DID to actual divider */ + target_div = dentist_get_divider_from_did(dprefclk_did); + /* get FbMult value */ + pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ); + + /* set up a fixed-point number + * this works because the int part is on the right edge of the register + * and the frac part is on the left edge + */ + + pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); + pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; + + /* multiply by REFCLK period */ + pll_req = dc_fixpt_mul_int(pll_req, 100000); + + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3850000; - } else { - /* DFS Slice 2 should be used for DPREFCLK */ - int dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL); - /* Convert DPREFCLK DFS Slice DID to actual divider*/ - int target_div = dentist_get_divider_from_did(dprefclk_did); - - /* get FbMult value */ - uint32_t pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ); - struct fixed31_32 pll_req; - - /* set up a fixed-point number - * this works because the int part is on the right edge of the register - * and the frac part is on the left edge - */ - - pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int); - pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac; - - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, 100000); - - /* integer part is now VCO frequency in kHz */ - clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3850000; - - /* Calculate the DPREFCLK in kHz.*/ - clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - } + /* Calculate the DPREFCLK in kHz.*/ + clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. //Also there is no plan for now that DFS BYPASS will be used on NV10/12/14. @@ -590,4 +587,3 @@ void dcn20_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); } - diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 811720749faf..694fe4271b4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -190,23 +190,17 @@ void dcn201_clk_mgr_construct(struct dc_context *ctx, clk_mgr->dprefclk_ss_divider = 1000; clk_mgr->ss_on_dprefclk = false; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn201_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); + clk_mgr->base.dprefclk_khz *= 100; + + if (clk_mgr->base.dprefclk_khz == 0) clk_mgr->base.dprefclk_khz = 600000; + + REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); + clk_mgr->base.dentist_vco_freq_khz *= 100000; + + if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3000000; - } else { - clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); - clk_mgr->base.dprefclk_khz *= 100; - - if (clk_mgr->base.dprefclk_khz == 0) - clk_mgr->base.dprefclk_khz = 600000; - - REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); - clk_mgr->base.dentist_vco_freq_khz *= 100000; - - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3000000; - } if (!debug->disable_dfs_bypass && bp->integrated_info) if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index bd9fd0b54f46..0c6a4ab72b1d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -705,6 +705,7 @@ void rn_clk_mgr_construct( struct dpm_clocks clock_table = { 0 }; enum pp_smu_status status = 0; int is_green_sardine = 0; + struct clk_log_info log_info = {0}; #if defined(CONFIG_DRM_AMD_DC_FP) is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); @@ -725,48 +726,41 @@ void rn_clk_mgr_construct( clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); + + /* SMU Version 55.51.0 and up no longer have an issue + * that needs to limit minimum dispclk */ + if (clk_mgr->smu_ver >= SMU_VER_55_51_0) + debug->min_disp_clk_khz = 0; + + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3600000; - } else { - struct clk_log_info log_info = {0}; - clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); - - /* SMU Version 55.51.0 and up no longer have an issue - * that needs to limit minimum dispclk */ - if (clk_mgr->smu_ver >= SMU_VER_55_51_0) - debug->min_disp_clk_khz = 0; - - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3600000; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { - if (clk_mgr->periodic_retraining_disabled) { - rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; - } else { - if (is_green_sardine) - rn_bw_params.wm_table = lpddr4_wm_table_gs; - else - rn_bw_params.wm_table = lpddr4_wm_table_rn; - } + if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { + if (clk_mgr->periodic_retraining_disabled) { + rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; } else { if (is_green_sardine) - rn_bw_params.wm_table = ddr4_wm_table_gs; - else { - if (ctx->dc->config.is_single_rank_dimm) - rn_bw_params.wm_table = ddr4_1R_wm_table_rn; - else - rn_bw_params.wm_table = ddr4_wm_table_rn; - } + rn_bw_params.wm_table = lpddr4_wm_table_gs; + else + rn_bw_params.wm_table = lpddr4_wm_table_rn; + } + } else { + if (is_green_sardine) + rn_bw_params.wm_table = ddr4_wm_table_gs; + else { + if (ctx->dc->config.is_single_rank_dimm) + rn_bw_params.wm_table = ddr4_1R_wm_table_rn; + else + rn_bw_params.wm_table = ddr4_wm_table_rn; } - /* Saved clocks configured at boot for debug purposes */ - rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); } + /* Saved clocks configured at boot for debug purposes */ + rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); @@ -786,9 +780,8 @@ void rn_clk_mgr_construct( } } - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { - /* enable powerfeatures when displaycount goes to 0 */ + /* enable powerfeatures when displaycount goes to 0 */ + if (clk_mgr->smu_ver >= 0x00371500) rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); - } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 27fbe906682f..8c9d45e5b13b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -147,17 +147,14 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis VBIOSSMC_MSG_SetDispclkFreq, khz_to_mhz_ceil(requested_dispclk_khz)); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { - if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_dispclk_set_mhz / 7); - } + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_dispclk_set_mhz / 7); } // pmfw always set clock more than or equal requested clock - if (!IS_DIAG_DC(dc->ctx->dce_environment)) - ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz)); + ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz)); return actual_dispclk_set_mhz * 1000; } @@ -221,15 +218,13 @@ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phy int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) { int actual_dppclk_set_mhz = -1; - struct dc *dc = clk_mgr->base.ctx->dc; actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, khz_to_mhz_ceil(requested_dpp_khz)); - if (!IS_DIAG_DC(dc->ctx->dce_environment)) - ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz)); + ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz)); return actual_dppclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3908e7cfd6cb..3271c8c7905d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -521,6 +521,8 @@ void dcn3_clk_mgr_construct( struct pp_smu_funcs *pp_smu, struct dccg *dccg) { + struct clk_state_registers_and_bypass s = { 0 }; + clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn3_funcs; clk_mgr->regs = &clk_mgr_regs; @@ -537,27 +539,19 @@ void dcn3_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.funcs = &dcn3_fpga_funcs; + /* integer part is now VCO frequency in kHz */ + clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.dentist_vco_freq_khz == 0) clk_mgr->base.dentist_vco_freq_khz = 3650000; + /* Convert dprefclk units from MHz to KHz */ + /* Value already divided by 10, some resolution lost */ - } else { - struct clk_state_registers_and_bypass s = { 0 }; - - /* integer part is now VCO frequency in kHz */ - clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dentist_vco_freq_khz == 0) - clk_mgr->base.dentist_vco_freq_khz = 3650000; - /* Convert dprefclk units from MHz to KHz */ - /* Value already divided by 10, some resolution lost */ - - /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */ - //ASSERT(s.dprefclk != 0); - if (s.dprefclk != 0) - clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; - } + /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */ + //ASSERT(s.dprefclk != 0); + if (s.dprefclk != 0) + clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; clk_mgr->dfs_bypass_enabled = false; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 01383aac6b41..a5489fe6875f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -117,7 +117,7 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base, display_count = vg_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ - if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) { + if (display_count == 0) { union display_idle_optimization_u idle_info = { 0 }; idle_info.idle_info.df_request_disabled = 1; @@ -151,10 +151,8 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -664,6 +662,7 @@ void vg_clk_mgr_construct( struct dccg *dccg) { struct smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &vg_funcs; @@ -703,32 +702,25 @@ void vg_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - vg_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); + + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; + + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) clk_mgr->base.base.dentist_vco_freq_khz = 3600000; + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + vg_bw_params.wm_table = lpddr5_wm_table; } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); - - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; - - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.base.dentist_vco_freq_khz == 0) - clk_mgr->base.base.dentist_vco_freq_khz = 3600000; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - vg_bw_params.wm_table = lpddr5_wm_table; - } else { - vg_bw_params.wm_table = ddr4_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + vg_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); @@ -746,12 +738,6 @@ void vg_clk_mgr_construct( if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); -/* - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) { - enable powerfeatures when displaycount goes to 0 - dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); - } -*/ } void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 3c743cd3d3f0..7ccd96959256 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -205,10 +205,8 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -672,6 +670,7 @@ void dcn31_clk_mgr_construct( struct dccg *dccg) { struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn31_funcs; @@ -711,29 +710,22 @@ void dcn31_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; + clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); + + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; + + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn31_bw_params.wm_table = lpddr5_wm_table; } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); - - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; - - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn31_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn31_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - + dcn31_bw_params.wm_table = ddr5_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 2a0c696f5861..2f7c8996b19d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -241,10 +241,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -724,6 +722,7 @@ void dcn314_clk_mgr_construct( struct dccg *dccg) { struct dcn314_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn314_funcs; @@ -763,29 +762,22 @@ void dcn314_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - } else { - struct clk_log_info log_info = {0}; + clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); - clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base); + if (clk_mgr->base.smu_ver) + clk_mgr->base.smu_present = true; - if (clk_mgr->base.smu_ver) - clk_mgr->base.smu_present = true; + /* TODO: Check we get what we expect during bringup */ + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - /* TODO: Check we get what we expect during bringup */ - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) + dcn314_bw_params.wm_table = lpddr5_wm_table; + else + dcn314_bw_params.wm_table = ddr5_wm_table; - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) - dcn314_bw_params.wm_table = lpddr5_wm_table; - else - dcn314_bw_params.wm_table = ddr5_wm_table; - - /* Saved clocks configured at boot for debug purposes */ - dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - - } + /* Saved clocks configured at boot for debug purposes */ + dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 300c6a5872d0..b2c4f97afc8b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -184,12 +184,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; - if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; - } + if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; + if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) + new_clocks->dispclk_khz = MIN_DPP_DISP_CLK; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -600,6 +598,7 @@ void dcn315_clk_mgr_construct( struct dccg *dccg) { struct dcn315_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn315_funcs; @@ -639,26 +638,19 @@ void dcn315_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; + clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); + + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn315_bw_params.wm_table = lpddr5_wm_table; } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base); - - if (clk_mgr->base.smu_ver > 0) - clk_mgr->base.smu_present = true; - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn315_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn315_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - + dcn315_bw_params.wm_table = ddr5_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 538126cefd4d..d7de756301cf 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -207,12 +207,10 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - if (new_clocks->dppclk_khz < 100000) - new_clocks->dppclk_khz = 100000; - if (new_clocks->dispclk_khz < 100000) - new_clocks->dispclk_khz = 100000; - } + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + if (new_clocks->dispclk_khz < 100000) + new_clocks->dispclk_khz = 100000; if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -616,6 +614,7 @@ void dcn316_clk_mgr_construct( struct dccg *dccg) { struct dcn316_smu_dpm_clks smu_dpm_clks = { 0 }; + struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn316_funcs; @@ -655,35 +654,27 @@ void dcn316_clk_mgr_construct( ASSERT(smu_dpm_clks.dpm_clks); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - clk_mgr->base.base.funcs = &dcn3_fpga_funcs; - clk_mgr->base.base.dentist_vco_freq_khz = 2500000; + clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); + + if (clk_mgr->base.smu_ver > 0) + clk_mgr->base.smu_present = true; + + // Skip this for now as it did not work on DCN315, renable during bring up + clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + + /* in case we don't get a value from the register, use default */ + if (clk_mgr->base.base.dentist_vco_freq_khz == 0) + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ + + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { + dcn316_bw_params.wm_table = lpddr5_wm_table; } else { - struct clk_log_info log_info = {0}; - - clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base); - - if (clk_mgr->base.smu_ver > 0) - clk_mgr->base.smu_present = true; - - // Skip this for now as it did not work on DCN315, renable during bring up - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.base.dentist_vco_freq_khz == 0) - clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ - - - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn316_bw_params.wm_table = lpddr5_wm_table; - } else { - dcn316_bw_params.wm_table = ddr4_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, - &clk_mgr->base.base, &log_info); - + dcn316_bw_params.wm_table = ddr4_wm_table; } + /* Saved clocks configured at boot for debug purposes */ + dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, + &clk_mgr->base.base, &log_info); clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 1df623b298a9..20bff6a346ba 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -941,7 +941,6 @@ void dcn32_clk_mgr_construct( clk_mgr->base.clks.ref_dtbclk_khz = 268750; } - /* integer part is now VCO frequency in kHz */ clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9f8efee27721..af929ee11af5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3588,29 +3588,27 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - for (i = 0; i < surface_count; i++) { - struct dc_plane_state *plane_state = srf_updates[i].surface; - /*set logical flag for lock/unlock use*/ - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (!pipe_ctx->plane_state) - continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) - continue; - pipe_ctx->plane_state->triplebuffer_flips = false; - if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ - pipe_ctx->plane_state->triplebuffer_flips = true; - } - } - if (update_type == UPDATE_TYPE_FULL) { - /* force vsync flip when reconfiguring pipes to prevent underflow */ - plane_state->flip_immediate = false; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + if (!pipe_ctx->plane_state) + continue; + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; } } + if (update_type == UPDATE_TYPE_FULL) { + /* force vsync flip when reconfiguring pipes to prevent underflow */ + plane_state->flip_immediate = false; + } } // Update Type FULL, Surface updates diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 72b261ad9587..0d3ec50b1385 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -490,25 +490,6 @@ bool dc_stream_add_writeback(struct dc *dc, struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->otg_inst = stream_status->primary_otg_inst; } - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* enable writeback */ - if (dc->hwss.enable_writeback) { - struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; - - if (dwb->funcs->is_enabled(dwb)) { - /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, wb_info, dc->current_state); - } else { - /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, wb_info, dc->current_state); - } - } - } return true; } @@ -553,17 +534,6 @@ bool dc_stream_remove_writeback(struct dc *dc, } stream->num_wb_info = j; - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - /* recalculate and apply DML parameters */ - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* disable writeback */ - if (dc->hwss.disable_writeback) - dc->hwss.disable_writeback(dc, dwb_pipe_inst); - } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 3db7a2e11af5..3907eeff560c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -464,8 +464,7 @@ void generic_reg_wait(const struct dc_context *ctx, field_value = get_reg_field_value_ex(reg_val, mask, shift); if (field_value == condition_value) { - if (i * delay_between_poll_us > 1000 && - !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + if (i * delay_between_poll_us > 1000) DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); @@ -477,8 +476,7 @@ void generic_reg_wait(const struct dc_context *ctx, delay_between_poll_us, time_out_num_tries, func_name, line); - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - BREAK_TO_DEBUGGER(); + BREAK_TO_DEBUGGER(); } void generic_write_indirect_reg(const struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 150c19286d67..6b4731b5e975 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -69,13 +69,6 @@ enum dce_environment { DCE_ENV_VIRTUAL_HW }; -/* Note: use these macro definitions instead of direct comparison! */ -#define IS_FPGA_MAXIMUS_DC(dce_environment) \ - (dce_environment == DCE_ENV_FPGA_MAXIMUS) - -#define IS_DIAG_DC(dce_environment) \ - (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG)) - struct dc_perf_trace { unsigned long read_count; unsigned long write_count; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 462c7a3ec3cc..ed8936405dfa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -920,25 +920,6 @@ static bool dce112_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; - if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { - unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_100hz = 7000000; - unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; - - /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_100hz); - REG_WRITE(MODULO[inst], dp_dto_ref_100hz); - - /* Enable DTO */ - if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) - REG_UPDATE_2(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1, - PIPE0_DTO_SRC_SEL, 1); - else - REG_UPDATE(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1); - return true; - } /* First disable SS * ATOMBIOS will enable by default SS on PLL for DP, * do not disable it here @@ -1015,25 +996,6 @@ static bool dcn31_program_pix_clk( REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); } else { - if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { - unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_100hz = 7000000; - unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; - - /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_100hz); - REG_WRITE(MODULO[inst], dp_dto_ref_100hz); - - /* Enable DTO */ - if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) - REG_UPDATE_2(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1, - PIPE0_DTO_SRC_SEL, 1); - else - REG_UPDATE(PIXEL_RATE_CNTL[inst], - DP_DTO0_ENABLE, 1); - return true; - } if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL) REG_UPDATE(PIXEL_RATE_CNTL[inst], diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index e74266cc0098..63009db8b5a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -1093,11 +1093,9 @@ static void dcn21_dmcu_construct( dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); - if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); - dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); - dmcu_dce->base.psp_version = psp_version; - } + psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); + dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); + dmcu_dce->base.psp_version = psp_version; } struct dmcu *dce_dmcu_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1a0be40d125c..c6fe2c00aedb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -209,9 +209,6 @@ static bool dce110_enable_display_power_gating( struct dc_context *ctx = dc->ctx; unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 19873ee1f78d..690caaaff019 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -120,9 +120,6 @@ static bool dce112_enable_display_power_gating( enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index d4afe6c824d2..45e08c4d5861 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -159,9 +159,6 @@ static bool dce120_enable_display_power_gating( enum bp_pipe_control_action cntl; struct dc_context *ctx = dc->ctx; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - return true; - if (power_gating == PIPE_GATING_CONTROL_INIT) cntl = ASIC_PIPE_INIT; else if (power_gating == PIPE_GATING_CONTROL_ENABLE) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 905246a2ece4..13b4e5118459 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1012,31 +1012,29 @@ static void dcn10_reset_back_end_for_pipe( return; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - link = pipe_ctx->stream->link; - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } } @@ -1499,28 +1497,6 @@ void dcn10_init_hw(struct dc *dc) if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init) dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) hws->funcs.disable_vga(dc->hwseq); @@ -1532,23 +1508,21 @@ void dcn10_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -3070,15 +3044,13 @@ void dcn10_prepare_bandwidth( if (dc->debug.sanity_checks) hws->funcs.verify_allow_pstate_change_high(dc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) - context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; + if (context->stream_count == 0) + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - false); - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, @@ -3110,15 +3082,13 @@ void dcn10_optimize_bandwidth( if (dc->debug.sanity_checks) hws->funcs.verify_allow_pstate_change_high(dc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) - context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; + if (context->stream_count == 0) + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index c9e53dc49c92..e1975991e075 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -653,11 +653,9 @@ void optc1_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - /* Should be fast, status does not update on maximus */ - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 21ec1ba5ed75..a0625209c86d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1651,9 +1651,8 @@ static bool dcn10_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto fail; + &res_create_funcs)) + goto fail; dcn10_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index e74c3ce561ab..b0a13eb8318c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2480,36 +2480,31 @@ static void dcn20_reset_back_end_for_pipe( return; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); - /* free acquired resources */ - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } } - else if (pipe_ctx->stream_res.dsc) { - dc->link_srv->set_dsc_enable(pipe_ctx, false); - } /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 4192c522e59a..e4b44e691ce6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -145,8 +145,4 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn20_funcs; dc->hwseq->funcs = dcn20_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index a08c335b7383..e0edc163d767 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -391,10 +391,9 @@ void optc2_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); } void optc2_triplebuffer_unlock(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 1d8c5805ef20..7dcae3183e07 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2488,15 +2488,9 @@ static bool dcn20_resource_construct( dc->caps.dp_hdmi21_pcon_support = true; - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - pool->base.pipe_count = 4; - pool->base.mpcc_count = pool->base.pipe_count; - dc->debug = debug_defaults_diags; - } else { - dc->debug = debug_defaults_diags; - } + //dcn2.0x dc->work_arounds.dedcn20_305_wa = true; @@ -2734,9 +2728,8 @@ static bool dcn20_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn20_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index 75472d53ff52..9e027db6d752 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -231,52 +231,39 @@ void dcn201_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); - REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); + hws->funcs.bios_golden_init(dc); - hws->funcs.dccg_init(hws); + if (dc->ctx->dc_bios->fw_info_valid) { + res_pool->ref_clocks.xtalin_clock_inKhz = + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); - } else { - hws->funcs.bios_golden_init(dc); + if (res_pool->dccg && res_pool->hubbub) { + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - if (dc->ctx->dc_bios->fw_info_valid) { - res_pool->ref_clocks.xtalin_clock_inKhz = - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); - - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } - } - } else - ASSERT_CRITICAL(false); - for (i = 0; i < dc->link_count; i++) { - /* Power up AND update implementation according to the - * required signal (which may be different from the - * default signal on connector). - */ - struct dc_link *link = dc->links[i]; - - link->link_enc->funcs->hw_init(link->link_enc); + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } - if (hws->fb_offset.quad_part == 0) - read_mmhub_vm_setup(hws); + } else + ASSERT_CRITICAL(false); + for (i = 0; i < dc->link_count; i++) { + /* Power up AND update implementation according to the + * required signal (which may be different from the + * default signal on connector). + */ + struct dc_link *link = dc->links[i]; + + link->link_enc->funcs->hw_init(link->link_enc); } + if (hws->fb_offset.quad_part == 0) + read_mmhub_vm_setup(hws); /* Blank pixel data with OPP DPG */ for (i = 0; i < res_pool->timing_generator_count; i++) { @@ -362,10 +349,6 @@ void dcn201_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } - /* end of FPGA. Below if real ASIC */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - return; - for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c index 730875dfd8b4..70fcbec03fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c @@ -55,10 +55,9 @@ static void optc201_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); } static void optc201_triplebuffer_unlock(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 6ea70da28aaa..212c475d95cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -1272,9 +1272,8 @@ static bool dcn201_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn201_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 8b58ce1db035..f024157bd6eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -151,8 +151,4 @@ void dcn21_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn21_funcs; dc->hwseq->funcs = dcn21_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 19aaa557b2db..76268a7b7934 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1503,11 +1503,6 @@ static bool dcn21_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - pool->base.pipe_count = 4; - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1721,9 +1716,8 @@ static bool dcn21_resource_construct( } if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; dcn21_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 3303c9aae068..ad4e1023b00e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -330,10 +330,6 @@ void dcn30_enable_writeback( DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ __func__, wb_info->dwb_pipe_inst,\ wb_info->mpcc_inst); - if (IS_DIAG_DC(dc->ctx->dce_environment)) { - /*till diags switch to warmup interface*/ - dcn30_mmhubbub_warmup(dc, 1, wb_info); - } /* Update writeback pipe */ dcn30_set_writeback(dc, wb_info, context); @@ -447,28 +443,6 @@ void dcn30_init_hw(struct dc *dc) if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); hws->funcs.disable_vga(dc->hwseq); @@ -491,23 +465,21 @@ void dcn30_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 18e94d8ae54f..3d19acaa12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -151,8 +151,4 @@ void dcn30_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn30_funcs; dc->hwseq->funcs = dcn30_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 34b08d90dc1d..c6f5f3df8061 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -55,10 +55,9 @@ void optc3_triplebuffer_lock(struct timing_generator *optc) REG_SET(OTG_MASTER_UPDATE_LOCK, 0, OTG_MASTER_UPDATE_LOCK, 1); - if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 67a34cda3774..cd94b5f5fdb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2376,10 +2376,7 @@ static bool dcn30_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2577,8 +2574,7 @@ static bool dcn30_resource_construct( /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5ac2a272c380..1cf84a086fec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1513,10 +1513,7 @@ static bool dcn301_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1710,9 +1707,8 @@ static bool dcn301_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn301_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 9f93c43115ba..efd98d64588d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -1309,8 +1309,6 @@ static bool dcn302_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1489,8 +1487,7 @@ static bool dcn302_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 7f72ef882ca4..fcd126602178 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1232,8 +1232,6 @@ static bool dcn303_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; // Init the vm_helper if (dc->vm_helper) @@ -1400,8 +1398,7 @@ static bool dcn303_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, pool, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 55494730e500..2a7f47642a44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -117,28 +117,6 @@ void dcn31_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - //Enable ability to power gate / don't force power on permanently - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(hws, true); - - return; - } - if (!dcb->funcs->is_accelerated_mode(dcb)) { hws->funcs.bios_golden_init(dc); if (hws->funcs.disable_vga) @@ -154,23 +132,21 @@ void dcn31_init_hw(struct dc *dc) res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (res_pool->dccg && res_pool->hubbub) { + if (res_pool->dccg && res_pool->hubbub) { - (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, - dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, - &res_pool->ref_clocks.dccg_ref_clock_inKhz); + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); - (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); - } else { - // Not all ASICs have DCCG sw component - res_pool->ref_clocks.dccg_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - res_pool->ref_clocks.dchub_ref_clock_inKhz = - res_pool->ref_clocks.xtalin_clock_inKhz; - } + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + // Not all ASICs have DCCG sw component + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; } } else ASSERT_CRITICAL(false); @@ -553,35 +529,31 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - link = pipe_ctx->stream->link; - /* DPMS may already disable or */ - /* dpms_off status is incorrect due to fastboot - * feature. When system resume from S4 with second - * screen only, the dpms_off would be true but - * VBIOS lit up eDP, so check link status too. - */ - if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) - dc->link_srv->set_dpms_off(pipe_ctx); - else if (pipe_ctx->stream_res.audio) - dc->hwss.disable_audio_stream(pipe_ctx); + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); - /* free acquired resources */ - if (pipe_ctx->stream_res.audio) { - /*disable az_endpoint*/ - pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - /*free audio*/ - if (dc->caps.dynamic_audio == true) { - /*we have to dynamic arbitrate the audio endpoints*/ - /*we free the resource, need reset is_audio_acquired*/ - update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, - pipe_ctx->stream_res.audio, false); - pipe_ctx->stream_res.audio = NULL; - } + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } - } else if (pipe_ctx->stream_res.dsc) { - dc->link_srv->set_dsc_enable(pipe_ctx, false); } pipe_ctx->stream = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index ba9e7dee6e5e..fc25cc300a17 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -154,8 +154,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn31_funcs; dc->hwseq->funcs = dcn31_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index ff8cd5076434..9c637c895f91 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1341,13 +1341,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1988,10 +1981,7 @@ static bool dcn31_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2195,9 +2185,8 @@ static bool dcn31_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 90be62c05822..86d6a514dec0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -160,8 +160,4 @@ void dcn314_hw_sequencer_construct(struct dc *dc) dc->hwss = dcn314_funcs; dc->hwseq->funcs = dcn314_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index abeeede38fb3..2483d37e425d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1375,13 +1375,6 @@ static struct dce_hwseq *dcn314_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -2101,8 +2094,7 @@ static bool dcn314_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) + &res_create_funcs)) goto create_fail; /* HW Sequencer and Plane caps */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 95fd3d087ea3..e6d87c162d26 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1342,13 +1342,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1947,10 +1940,7 @@ static bool dcn315_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2131,9 +2121,8 @@ static bool dcn315_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 9ead347a33e9..a3be61cc541f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -1340,13 +1340,6 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; - /* DCN3.1 FPGA Workaround - * Need to enable HPO DP Stream Encoder before setting OTG master enable. - * To do so, move calling function enable_stream_timing to only be done AFTER calling - * function core_link_enable_stream - */ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1844,10 +1837,7 @@ static bool dcn316_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2028,9 +2018,8 @@ static bool dcn316_resource_construct( /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer and Plane caps */ dcn31_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 24a890d879b8..6f9a165c1eab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -161,8 +161,4 @@ void dcn32_hw_sequencer_init_functions(struct dc *dc) dc->hwss = dcn32_funcs; dc->hwseq->funcs = dcn32_private_funcs; - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwseq->funcs.init_pipes = NULL; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 98c394f9f8cf..33abc8c9d4be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2260,10 +2260,7 @@ static bool dcn32_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2319,8 +2316,7 @@ static bool dcn32_resource_construct( } /* DML */ - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); + dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; @@ -2457,9 +2453,8 @@ static bool dcn32_resource_construct( /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index f4cd9749ffdf..af0bb3e94250 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1801,10 +1801,7 @@ static bool dcn321_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { - dc->debug = debug_defaults_diags; - } else - dc->debug = debug_defaults_diags; + // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1860,8 +1857,7 @@ static bool dcn321_resource_construct( } /* DML */ - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); /* IRQ Service */ init_data.ctx = dc->ctx; @@ -1993,9 +1989,8 @@ static bool dcn321_resource_construct( /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, - (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? - &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + &res_create_funcs)) + goto create_fail; /* HW Sequencer init functions and Plane caps */ dcn32_hw_sequencer_init_functions(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c index 422f17aefd4a..6ce90678b33c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -333,45 +333,43 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits)); /* Default clock levels are used for diags, which may lead to overclocking. */ - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn3_01_ip.max_num_dpp = pool->base.pipe_count; - dcn3_01_soc.num_chans = bw_params->num_channels; + dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn3_01_ip.max_num_dpp = pool->base.pipe_count; + dcn3_01_soc.num_chans = bw_params->num_channels; - ASSERT(clk_table->num_entries); - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; } - - s[i].state = i; - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - s[i].dram_bw_per_chan_gbps = - dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; } - if (clk_table->num_entries) { - dcn3_01_soc.num_states = clk_table->num_entries; - /* duplicate last level */ - s[dcn3_01_soc.num_states] = - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; - s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; - } + s[i].state = i; + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + s[i].dram_bw_per_chan_gbps = + dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + + if (clk_table->num_entries) { + dcn3_01_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + s[dcn3_01_soc.num_states] = + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; + s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; } memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 19d034341e64..deb6d162a2d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -582,6 +582,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits; struct clk_limit_table *clk_table = &bw_params->clk_table; unsigned int i, closest_clk_lvl; + int max_dispclk_mhz = 0, max_dppclk_mhz = 0; int j; dc_assert_fp_enabled(); @@ -589,59 +590,55 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits)); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_1_soc.num_chans = bw_params->num_channels; - dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; - dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_1_soc.num_chans = bw_params->num_channels; + ASSERT(clk_table->num_entries); - ASSERT(clk_table->num_entries); + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } - /* Prepass to find max clocks independent of voltage level. */ - for (i = 0; i < clk_table->num_entries; ++i) { - if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; - if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; - } - - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; } - - s[i].state = i; - - /* Clocks dependent on voltage level. */ - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * - 2 * clk_table->entries[i].wck_ratio; - - /* Clocks independent of voltage level. */ - s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : - dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - - s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : - dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - - s[i].dram_bw_per_chan_gbps = - dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - if (clk_table->num_entries) { - dcn3_1_soc.num_states = clk_table->num_entries; } + + s[i].state = i; + + /* Clocks dependent on voltage level. */ + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * + 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + s[i].dram_bw_per_chan_gbps = + dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + if (clk_table->num_entries) { + dcn3_1_soc.num_states = clk_table->num_entries; } memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits)); @@ -655,10 +652,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); - else - dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); } void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -719,10 +713,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); - else - dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); } void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -738,71 +729,68 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits)); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; + dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_16_soc.num_chans = bw_params->num_channels; - dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; - dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count; - dcn3_16_soc.num_chans = bw_params->num_channels; + ASSERT(clk_table->num_entries); - ASSERT(clk_table->num_entries); + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } - /* Prepass to find max clocks independent of voltage level. */ - for (i = 0; i < clk_table->num_entries; ++i) { - if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; - if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= + clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + // Ported from DCN315 + if (clk_table->num_entries == 1) { + /*smu gives one DPM level, let's take the highest one*/ + closest_clk_lvl = dcn3_16_soc.num_states - 1; } - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= - clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } - // Ported from DCN315 - if (clk_table->num_entries == 1) { - /*smu gives one DPM level, let's take the highest one*/ - closest_clk_lvl = dcn3_16_soc.num_states - 1; - } + s[i].state = i; - s[i].state = i; - - /* Clocks dependent on voltage level. */ - s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - if (clk_table->num_entries == 1 && - s[i].dcfclk_mhz < - dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { - /*SMU fix not released yet*/ - s[i].dcfclk_mhz = - dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; - } - s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * - 2 * clk_table->entries[i].wck_ratio; - - /* Clocks independent of voltage level. */ - s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : - dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - - s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : - dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - - s[i].dram_bw_per_chan_gbps = - dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - s[i].phyclk_d18_mhz = - dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - if (clk_table->num_entries) { - dcn3_16_soc.num_states = clk_table->num_entries; + /* Clocks dependent on voltage level. */ + s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + if (clk_table->num_entries == 1 && + s[i].dcfclk_mhz < + dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { + /*SMU fix not released yet*/ + s[i].dcfclk_mhz = + dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; } + s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + s[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * + 2 * clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz : + dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + s[i].dram_bw_per_chan_gbps = + dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + s[i].phyclk_d18_mhz = + dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + if (clk_table->num_entries) { + dcn3_16_soc.num_states = clk_table->num_entries; } memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits)); @@ -817,10 +805,7 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000; } - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); - else - dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA); + dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31); } int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index fcde8f21b8be..4113ce79c4af 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -1432,14 +1432,6 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); - // hack for FPGA - if (mode_lib->project == DML_PROJECT_DCN31_FPGA) { - if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) { - disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1; - dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n"); - } - } - disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 554152371eb5..318b9c2bc9be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -190,8 +190,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc_assert_fp_enabled(); // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) { - + if (dc->config.use_default_clock_table == false) { dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator; dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count; @@ -267,10 +266,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dcn20_patch_bounding_box(dc, &dcn3_14_soc); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); - else - dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA); } static bool is_dual_plane(enum surface_pixel_format format) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61ba3e33bb11..b3e8dc08030c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -1520,14 +1520,6 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); - // hack for FPGA - if (mode_lib->project == DML_PROJECT_DCN31_FPGA) { - if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) { - disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1; - dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n"); - } - } - disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13)); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index d8d8fcd5ef1f..137ff970c9aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2530,80 +2530,78 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa { dc_assert_fp_enabled(); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* Overrides from dc->config options */ - dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + /* Overrides from dc->config options */ + dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - /* Override from passed dc->bb_overrides if available*/ - if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } - - if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dcn3_2_soc.sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - - if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } - - if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - dcn3_2_soc.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - - if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) - != dc->bb_overrides.fclk_clock_change_latency_ns - && dc->bb_overrides.fclk_clock_change_latency_ns) { - dcn3_2_soc.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - } - - if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - dcn3_2_soc.dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } - - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_2_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_2_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_2_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } - - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, - dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); - } - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* Override from passed dc->bb_overrides if available*/ + if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; } + if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + dcn3_2_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + + if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + dcn3_2_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + + if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) + != dc->bb_overrides.fclk_clock_change_latency_ns + && dc->bb_overrides.fclk_clock_change_latency_ns) { + dcn3_2_soc.fclk_change_latency_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000; + } + + if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + dcn3_2_soc.dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } + + /* Override from VBIOS if VBIOS bb_info available */ + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_2_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_2_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_2_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; + } + } + + /* Override from VBIOS for num_chan */ + if (dc->ctx->dc_bios->vram_info.num_chans) { + dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } + + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* DML DSC delay factor workaround */ dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; @@ -2614,7 +2612,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { + if (bw_params->clk_table.entries[0].memclk_mhz) { if (dc->debug.use_legacy_soc_bb_mechanism) { unsigned int i = 0, j = 0, num_states = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 342a1bcb4927..ffd7c3c1b142 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -471,80 +471,78 @@ static void dcn321_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) { dc_assert_fp_enabled(); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* Overrides from dc->config options */ - dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + /* Overrides from dc->config options */ + dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - /* Override from passed dc->bb_overrides if available*/ - if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns - && dc->bb_overrides.sr_exit_time_ns) { - dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; - } - - if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000) - != dc->bb_overrides.sr_enter_plus_exit_time_ns - && dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dcn3_21_soc.sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - - if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns - && dc->bb_overrides.urgent_latency_ns) { - dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; - } - - if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000) - != dc->bb_overrides.dram_clock_change_latency_ns - && dc->bb_overrides.dram_clock_change_latency_ns) { - dcn3_21_soc.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - - if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000) - != dc->bb_overrides.fclk_clock_change_latency_ns - && dc->bb_overrides.fclk_clock_change_latency_ns) { - dcn3_21_soc.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - } - - if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000) - != dc->bb_overrides.dummy_clock_change_latency_ns - && dc->bb_overrides.dummy_clock_change_latency_ns) { - dcn3_21_soc.dummy_pstate_latency_us = - dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; - } - - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_21_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_21_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_21_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } - - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, - dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); - } - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* Override from passed dc->bb_overrides if available*/ + if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns + && dc->bb_overrides.sr_exit_time_ns) { + dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; } + if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000) + != dc->bb_overrides.sr_enter_plus_exit_time_ns + && dc->bb_overrides.sr_enter_plus_exit_time_ns) { + dcn3_21_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + + if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns + && dc->bb_overrides.urgent_latency_ns) { + dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000) + != dc->bb_overrides.dram_clock_change_latency_ns + && dc->bb_overrides.dram_clock_change_latency_ns) { + dcn3_21_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + + if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000) + != dc->bb_overrides.fclk_clock_change_latency_ns + && dc->bb_overrides.fclk_clock_change_latency_ns) { + dcn3_21_soc.fclk_change_latency_us = + dc->bb_overrides.fclk_clock_change_latency_ns / 1000; + } + + if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000) + != dc->bb_overrides.dummy_clock_change_latency_ns + && dc->bb_overrides.dummy_clock_change_latency_ns) { + dcn3_21_soc.dummy_pstate_latency_us = + dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; + } + + /* Override from VBIOS if VBIOS bb_info available */ + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_21_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_21_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_21_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; + } + } + + /* Override from VBIOS for num_chan */ + if (dc->ctx->dc_bios->vram_info.num_chans) { + dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } + + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + /* DML DSC delay factor workaround */ dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; @@ -555,7 +553,6 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { if (dc->debug.use_legacy_soc_bb_mechanism) { unsigned int i = 0, j = 0, num_states = 0; @@ -699,6 +696,5 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); if (dc->current_state) dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); - } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index bdf3ac6cadd5..da0cfbb071e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -113,7 +113,6 @@ void dml_init_instance(struct display_mode_lib *lib, lib->funcs = dml30_funcs; break; case DML_PROJECT_DCN31: - case DML_PROJECT_DCN31_FPGA: case DML_PROJECT_DCN315: lib->funcs = dml31_funcs; break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index a9d49ef58fb5..5edf69fa40d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -41,7 +41,6 @@ enum dml_project { DML_PROJECT_DCN30, DML_PROJECT_DCN31, DML_PROJECT_DCN315, - DML_PROJECT_DCN31_FPGA, DML_PROJECT_DCN314, DML_PROJECT_DCN32, }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index edd7d026a762..586fe25c1702 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -28,24 +28,6 @@ #include "dccg.h" #include "clk_mgr.h" -static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) -{ - switch (link->link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - return PHYD32CLKA; - case TRANSMITTER_UNIPHY_B: - return PHYD32CLKB; - case TRANSMITTER_UNIPHY_C: - return PHYD32CLKC; - case TRANSMITTER_UNIPHY_D: - return PHYD32CLKD; - case TRANSMITTER_UNIPHY_E: - return PHYD32CLKE; - default: - return PHYD32CLKA; - } -} - static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { @@ -120,81 +102,26 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } -static void enable_hpo_dp_fpga_link_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal, - enum clock_source_id clock_source, - const struct dc_link_settings *link_settings) -{ - const struct dc *dc = link->dc; - enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(link); - int phyd32clk_freq_khz = link_settings->link_rate == LINK_RATE_UHBR10 ? 312500 : - link_settings->link_rate == LINK_RATE_UHBR13_5 ? 412875 : - link_settings->link_rate == LINK_RATE_UHBR20 ? 625000 : 0; - - dm_set_phyd32clk(dc->ctx, phyd32clk_freq_khz); - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_PHYD32CLK, - true); - dc->res_pool->dccg->funcs->enable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst, - phyd32clk); - link_res->hpo_dp_link_enc->funcs->link_enable( - link_res->hpo_dp_link_enc, - link_settings->lane_count); - -} - static void enable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { - if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) - enable_hpo_dp_fpga_link_output(link, link_res, signal, - clock_source, link_settings); - else - link_res->hpo_dp_link_enc->funcs->enable_link_phy( - link_res->hpo_dp_link_enc, - link_settings, - link->link_enc->transmitter, - link->link_enc->hpd_source); -} - - -static void disable_hpo_dp_fpga_link_output(struct dc_link *link, - const struct link_resource *link_res, - enum signal_type signal) -{ - const struct dc *dc = link->dc; - - link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); - dc->res_pool->dccg->funcs->disable_symclk32_le( - dc->res_pool->dccg, - link_res->hpo_dp_link_enc->inst); - dc->res_pool->dccg->funcs->set_physymclk( - dc->res_pool->dccg, - link->link_enc_hw_inst, - PHYSYMCLK_FORCE_SRC_SYMCLK, - false); - dm_set_phyd32clk(dc->ctx, 0); + link_res->hpo_dp_link_enc->funcs->enable_link_phy( + link_res->hpo_dp_link_enc, + link_settings, + link->link_enc->transmitter, + link->link_enc->hpd_source); } static void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { - if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) { - disable_hpo_dp_fpga_link_output(link, link_res, signal); - } else { link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); - } } static void set_hpo_dp_link_test_pattern(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 2267fb097830..2963267fe74a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -765,7 +765,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + if (dc_is_virtual_signal(stream->signal)) result = true; else result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); @@ -778,7 +778,6 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -816,8 +815,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) - && !dp_is_128b_132b_signal(pipe_ctx)) { + if (dc_is_dp_signal(stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -849,7 +847,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) false, NULL, true); - else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + else { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -2271,8 +2269,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) } } - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) return; if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { @@ -2358,6 +2355,8 @@ void link_set_dpms_on( enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + bool apply_edp_fast_boot_optimization = + pipe_ctx->stream->apply_edp_fast_boot_optimization; ASSERT(is_master_pipe_for_link(link, pipe_ctx)); @@ -2375,9 +2374,8 @@ void link_set_dpms_on( } } - if (!IS_DIAG_DC(dc->ctx->dce_environment) && - dc_is_virtual_signal(pipe_ctx->stream->signal)) - return; + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); @@ -2402,139 +2400,127 @@ void link_set_dpms_on( link_hwss->setup_stream_attribute(pipe_ctx); - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - bool apply_edp_fast_boot_optimization = - pipe_ctx->stream->apply_edp_fast_boot_optimization; + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); - // Enable VPG before building infoframe - if (vpg && vpg->funcs->vpg_poweron) - vpg->funcs->vpg_poweron(vpg); + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); - resource_build_info_frame(pipe_ctx); - dc->hwss.update_info_frame(pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + /* Do not touch link on seamless boot optimization. */ + if (pipe_ctx->stream->apply_seamless_boot_optimization) { + pipe_ctx->stream->dpms_off = false; - /* Do not touch link on seamless boot optimization. */ - if (pipe_ctx->stream->apply_seamless_boot_optimization) { - pipe_ctx->stream->dpms_off = false; - - /* Still enable stream features & audio on seamless boot for DP external displays */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { - enable_stream_features(pipe_ctx); - dc->hwss.enable_audio_stream(pipe_ctx); - } - - update_psp_stream_config(pipe_ctx, false); - return; - } - - /* eDP lit up by bios already, no need to enable again. */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC && - !pipe_ctx->next_odm_pipe) { - pipe_ctx->stream->dpms_off = false; - update_psp_stream_config(pipe_ctx, false); - return; - } - - if (pipe_ctx->stream->dpms_off) - return; - - /* Have to setup DSC before DIG FE and BE are connected (which happens before the - * link training). This is to make sure the bandwidth sent to DIG BE won't be - * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag - * will be automatically set at a later time when the video is enabled - * (DP_VID_STREAM_EN = 1). - */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - link_set_dsc_enable(pipe_ctx, true); - - } - - status = enable_link(state, pipe_ctx); - - if (status != DC_OK) { - DC_LOG_WARNING("enabling link %u failed: %d\n", - pipe_ctx->stream->link->link_index, - status); - - /* Abort stream enable *unless* the failure was due to - * DP link training - some DP monitors will recover and - * show the stream anyway. But MST displays can't proceed - * without link training. - */ - if (status != DC_FAIL_DP_LINK_TRAINING || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - if (false == stream->link->link_status.link_active) - disable_link(stream->link, &pipe_ctx->link_res, - pipe_ctx->stream->signal); - BREAK_TO_DEBUGGER(); - return; - } - } - - /* turn off otg test pattern if enable */ - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - COLOR_DEPTH_UNDEFINED); - - /* This second call is needed to reconfigure the DIG - * as a workaround for the incorrect value being applied - * from transmitter control. - */ - if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || - dp_is_128b_132b_signal(pipe_ctx))) { - if (link_enc) - link_enc->funcs->setup( - link_enc, - pipe_ctx->stream->signal); - } - - dc->hwss.enable_stream(pipe_ctx); - - /* Set DPS PPS SDP (AKA "info frames") */ - if (pipe_ctx->stream->timing.flags.DSC) { - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) { - dp_set_dsc_on_rx(pipe_ctx, true); - link_set_dsc_pps_packet(pipe_ctx, true, true); - } - } - - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && - dp_is_128b_132b_signal(pipe_ctx)) - update_sst_payload(pipe_ctx, true); - - dc->hwss.unblank_stream(pipe_ctx, - &pipe_ctx->stream->link->cur_link_settings); - - if (stream->sink_patches.delay_ignore_msa > 0) - msleep(stream->sink_patches.delay_ignore_msa); - - if (dc_is_dp_signal(pipe_ctx->stream->signal)) + /* Still enable stream features & audio on seamless boot for DP external displays */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { enable_stream_features(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); + } + update_psp_stream_config(pipe_ctx, false); - - dc->hwss.enable_audio_stream(pipe_ctx); - - } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - if (dp_is_128b_132b_signal(pipe_ctx)) - dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx); - if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - link_set_dsc_enable(pipe_ctx, true); + return; } + /* eDP lit up by bios already, no need to enable again. */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + apply_edp_fast_boot_optimization && + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { + pipe_ctx->stream->dpms_off = false; + update_psp_stream_config(pipe_ctx, false); + return; + } + + if (pipe_ctx->stream->dpms_off) + return; + + /* Have to setup DSC before DIG FE and BE are connected (which happens before the + * link training). This is to make sure the bandwidth sent to DIG BE won't be + * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag + * will be automatically set at a later time when the video is enabled + * (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, true); + + } + + status = enable_link(state, pipe_ctx); + + if (status != DC_OK) { + DC_LOG_WARNING("enabling link %u failed: %d\n", + pipe_ctx->stream->link->link_index, + status); + + /* Abort stream enable *unless* the failure was due to + * DP link training - some DP monitors will recover and + * show the stream anyway. But MST displays can't proceed + * without link training. + */ + if (status != DC_FAIL_DP_LINK_TRAINING || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, &pipe_ctx->link_res, + pipe_ctx->stream->signal); + BREAK_TO_DEBUGGER(); + return; + } + } + + /* turn off otg test pattern if enable */ + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + COLOR_DEPTH_UNDEFINED); + + /* This second call is needed to reconfigure the DIG + * as a workaround for the incorrect value being applied + * from transmitter control. + */ + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + dp_is_128b_132b_signal(pipe_ctx))) { + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); + } + + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) { + dp_set_dsc_on_rx(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + } + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + allocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + dp_is_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, true); + + dc->hwss.unblank_stream(pipe_ctx, + &pipe_ctx->stream->link->cur_link_settings); + + if (stream->sink_patches.delay_ignore_msa > 0) + msleep(stream->sink_patches.delay_ignore_msa); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + enable_stream_features(pipe_ctx); + update_psp_stream_config(pipe_ctx, false); + + dc->hwss.enable_audio_stream(pipe_ctx); + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { set_avmute(pipe_ctx, false); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index f46864630506..3a5e80b57711 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -326,8 +326,7 @@ bool dp_is_fec_supported(const struct dc_link *link) return (dc_is_dp_signal(link->connector_signal) && link_enc && link_enc->features.fec_supported && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && - !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE); } bool dp_should_enable_fec(const struct dc_link *link) From 268182606f26434c5d3ebd0e86efcb0418dec487 Mon Sep 17 00:00:00 2001 From: Cruise Hung Date: Fri, 12 May 2023 23:33:46 +0800 Subject: [PATCH 627/861] drm/amd/display: Update correct DCN314 register header [Why] The register header for DCN314 is not correct. [How] Update correct DCN314 register header. Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Cruise Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/Makefile | 2 +- .../drm/amd/display/dmub/src/dmub_dcn314.c | 62 +++++++++++++++++++ .../drm/amd/display/dmub/src/dmub_dcn314.h | 33 ++++++++++ .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 5 +- 4 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c create mode 100644 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile index 0589ad4778ee..caf095aca8f3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/Makefile +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -22,7 +22,7 @@ DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o -DMUB += dmub_dcn31.o dmub_dcn315.o dmub_dcn316.o +DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o DMUB += dmub_dcn32.o AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c new file mode 100644 index 000000000000..48a06dbd9be7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c @@ -0,0 +1,62 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../dmub_srv.h" +#include "dmub_reg.h" +#include "dmub_dcn314.h" + +#include "dcn/dcn_3_1_4_offset.h" +#include "dcn/dcn_3_1_4_sh_mask.h" + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg +#define CTX dmub +#define REGS dmub->regs_dcn31 +#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name) + +/* Registers. */ + +const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = { +#define DMUB_SR(reg) REG_OFFSET_EXP(reg), + { + DMUB_DCN31_REGS() + DMCUB_INTERNAL_REGS() + }, +#undef DMUB_SR + +#define DMUB_SF(reg, field) FD_MASK(reg, field), + { DMUB_DCN31_FIELDS() }, +#undef DMUB_SF + +#define DMUB_SF(reg, field) FD_SHIFT(reg, field), + { DMUB_DCN31_FIELDS() }, +#undef DMUB_SF +}; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h new file mode 100644 index 000000000000..674267a2940e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h @@ -0,0 +1,33 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN314_H_ +#define _DMUB_DCN314_H_ + +#include "dmub_dcn31.h" + +extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs; + +#endif /* _DMUB_DCN314_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index ea3bed70a229..9e9a6a44a7ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -32,6 +32,7 @@ #include "dmub_dcn302.h" #include "dmub_dcn303.h" #include "dmub_dcn31.h" +#include "dmub_dcn314.h" #include "dmub_dcn315.h" #include "dmub_dcn316.h" #include "dmub_dcn32.h" @@ -225,7 +226,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) case DMUB_ASIC_DCN314: case DMUB_ASIC_DCN315: case DMUB_ASIC_DCN316: - if (asic == DMUB_ASIC_DCN315) + if (asic == DMUB_ASIC_DCN314) + dmub->regs_dcn31 = &dmub_srv_dcn314_regs; + else if (asic == DMUB_ASIC_DCN315) dmub->regs_dcn31 = &dmub_srv_dcn315_regs; else if (asic == DMUB_ASIC_DCN316) dmub->regs_dcn31 = &dmub_srv_dcn316_regs; From 2da3556c8650798606c0d3f2288b2f87c6665a69 Mon Sep 17 00:00:00 2001 From: Saaem Rizvi Date: Thu, 11 May 2023 15:16:35 -0400 Subject: [PATCH 628/861] drm/amd/display: Trigger DIO FIFO resync on commit streams for DCN32 [WHY and HOW] Currently, on DCN32 we have an old workaround to resolve a DIO FIFO speed issue when writing to the OTG DIVIDER register. However, this workaround is not safe as we should be applying the DIO FIFO rampup logic when the OTG re disabled along with the encoders. This new workaround accounts for this. If the workaround sequence is incorrect, like it is was, there is a chance we might hang. this new workaround was first implemented in DCN314. Reviewed-by: Alvin Lee Acked-by: Tom Chung Signed-off-by: Saaem Rizvi Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn314/dcn314_hwseq.c | 2 ++ .../gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c | 21 ++++---------- .../gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h | 5 ++-- .../drm/amd/display/dc/dcn32/dcn32_hwseq.c | 29 +++++++++++++++++++ .../drm/amd/display/dc/dcn32/dcn32_hwseq.h | 2 ++ .../gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 1 + 6 files changed, 42 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 70fac2ebb757..46b6f4f9e1fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -412,6 +412,8 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (otg_disabled[i]) pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index ffbb739d85b6..11e28e056cf7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -42,18 +42,14 @@ #define DC_LOGGER \ dccg->ctx->logger -/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV - * without the probability of causing a DIG FIFO error. - */ -static void dccg32_wait_for_dentist_change_done( +static void dccg32_trigger_dio_fifo_resync( struct dccg *dccg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + uint32_t dispclk_rdivider_value = 0; - uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL); - - REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value); - REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000); + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } static void dccg32_get_pixel_rate_div( @@ -124,29 +120,21 @@ static void dccg32_set_pixel_rate_div( REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG0_PIXEL_RATE_DIVK1, k1, OTG0_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; case 1: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG1_PIXEL_RATE_DIVK1, k1, OTG1_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; case 2: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG2_PIXEL_RATE_DIVK1, k1, OTG2_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; case 3: REG_UPDATE_2(OTG_PIXEL_RATE_DIV, OTG3_PIXEL_RATE_DIVK1, k1, OTG3_PIXEL_RATE_DIVK2, k2); - - dccg32_wait_for_dentist_change_done(dccg); break; default: BREAK_TO_DEBUGGER(); @@ -352,6 +340,7 @@ static const struct dccg_funcs dccg32_funcs = { .otg_add_pixel = dccg32_otg_add_pixel, .otg_drop_pixel = dccg32_otg_drop_pixel, .set_pixel_rate_div = dccg32_set_pixel_rate_div, + .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync, }; struct dccg *dccg32_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h index 8071ab98d708..cf5508718122 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h @@ -112,8 +112,9 @@ DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\ - DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh) - + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\ + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh) struct dccg *dccg32_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 2de910e0ce75..7f5cd8c8d49b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -1175,6 +1175,35 @@ void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) pix_per_cycle); } +void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +{ + uint8_t i; + struct pipe_ctx *pipe = NULL; + bool otg_disabled[MAX_PIPES] = {false}; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->top_pipe || pipe->prev_odm_pipe) + continue; + + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); + otg_disabled[i] = true; + } + } + + hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (otg_disabled[i]) + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } +} + void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 6694c1d14aa3..6dbe929cf599 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -75,6 +75,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx); +void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + void dcn32_subvp_pipe_control_lock(struct dc *dc, struct dc_state *context, bool lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 6f9a165c1eab..8356b31e1d9a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -153,6 +153,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, }; From 5b466b28fa943aa9441cd27a9a469e1330814299 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 9 May 2023 13:59:36 -0600 Subject: [PATCH 629/861] drm/amd/display: Reorganize DCN30 Makefile DCN30 has a lot of files in the Makefile, and adding each one next to the other makes it hard to read and can increase the chance of merge conflicts. This commit just reorganize the Makefile to put each file associated with DCN30 in its own line. Reviewed-by: Chris Park Acked-by: Tom Chung Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/Makefile | 38 +++++++++++++------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index b7c2ae9ddfda..4a3e9e47b6b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -1,16 +1,16 @@ -# +# # Copyright 2020 Advanced Micro Devices, Inc. -# +# # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL @@ -18,17 +18,31 @@ # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. -# +# # Authors: AMD -# -# +# +# + +DCN30 := \ + dcn30_init.o \ + dcn30_hubbub.o \ + dcn30_hubp.o \ + dcn30_dpp.o \ + dcn30_optc.o \ + dcn30_dccg.o \ + dcn30_hwseq.o \ + dcn30_mpc.o dcn30_vpg.o \ + dcn30_afmt.o \ + dcn30_dio_stream_encoder.o \ + dcn30_dwb.o \ + dcn30_dpp_cm.o \ + dcn30_dwb_cm.o \ + dcn30_cm_common.o \ + dcn30_mmhubbub.o \ + dcn30_resource.o \ + dcn30_dio_link_encoder.o -DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \ - dcn30_dccg.o dcn30_hwseq.o dcn30_mpc.o dcn30_vpg.o \ - dcn30_afmt.o dcn30_dio_stream_encoder.o dcn30_dwb.o \ - dcn30_dpp_cm.o dcn30_dwb_cm.o dcn30_cm_common.o dcn30_mmhubbub.o \ - dcn30_dio_link_encoder.o dcn30_resource.o AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30)) From 257e9891db0b961b79c9f0ca50c808a738000e70 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Fri, 12 May 2023 09:56:05 -0400 Subject: [PATCH 630/861] drm/amd/display: cache trace buffer size [Why & How] Cache the trace buffer size retrieved from DMUB FW in the driver Reviewed-by: Nicholas Kazlauskas Reviewed-by: Aric Cyr Acked-by: Tom Chung Signed-off-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 1 + drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 7c9a2b34bd05..e210cb082ebd 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -428,6 +428,7 @@ struct dmub_srv { enum dmub_asic asic; void *user_ctx; uint32_t fw_version; + uint32_t trace_buffer_size; bool is_virtual; struct dmub_fb scratch_mem_fb; volatile const struct dmub_fw_state *fw_state; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 9e9a6a44a7ac..8b9af18e84fe 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -427,6 +427,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, dmub->fw_version = fw_info->fw_version; } + dmub->trace_buffer_size = trace_buffer_size; + trace_buff->base = dmub_align(mail->top, 256); trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); From 0d1ff99a3398ad4b7165ecd8e69d360090b32250 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 15 May 2023 00:14:00 -0400 Subject: [PATCH 631/861] drm/amd/display: 3.2.237 This version brings along following fixes: - Improve the message printed - Disable dcn315 pixel rate crb when scaling - Update SR watermarks for DCN314 - Fix dcn315 pixel rate crb scaling check - Reset CRTC State Before Restore from S2idle - Have Payload Properly Created After Resume - Trigger DIO FIFO resync on commit streams - Revert vblank change that causes null pointer crash - Fix possible underflow for displays with large vblank - Apply 60us prefetch for DCFCLK <= 300Mhz - Update correct DCN314 register header Acked-by: Tom Chung Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7fc087f85d39..7fd9f5a9e191 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.236" +#define DC_VER "3.2.237" #define MAX_SURFACES 3 #define MAX_PLANES 6 From 40e39d72277fc014e7b8149def35831998c8df2f Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 17:48:12 +0530 Subject: [PATCH 632/861] drm/amdgpu: Fix unused amdgpu_acpi_get_numa_info function in amdgpu_acpi_get_node_id() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the below compiler complaining error: drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c:860:33: error: unused function 'amdgpu_acpi_get_numa_info' [-Werror,-Wunused-function] static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) ^ 1 error generated. By guarding amdgpu_acpi_get_numa_info & amdgpu_acpi_get_numa_size function, only when CONFIG_ACPI_NUMA is enabled. Suggested-by: Lijo Lazar Cc: Christian König Cc: Lijo Lazar Cc: Luben Tuikov Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 873532c4adbe..02a66844e73e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -841,6 +841,7 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta return r; } +#ifdef CONFIG_ACPI_NUMA static inline uint64_t amdgpu_acpi_get_numa_size(int nid) { /* This is directly using si_meminfo_node implementation as the @@ -886,6 +887,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) return numa_info; } +#endif /** * amdgpu_acpi_get_node_id - obtain the NUMA node id for corresponding amdgpu From 1893549af62135c788a66e7ff27c81459c532fb2 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 10:25:28 +0530 Subject: [PATCH 633/861] drm/amdgpu: Fix uninitialized variable in gfxhub_v1_2_xcp_resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c:657:6: error: variable 'ret' is used uninitialized whenever 'if' condition is false [-Werror,-Wsometimes-uninitialized] if (!amdgpu_sriov_vf(adev)) ^~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c:660:9: note: uninitialized use occurs here return ret; ^~~ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c:657:2: note: remove the 'if' if its condition is always true if (!amdgpu_sriov_vf(adev)) ^~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c:648:9: note: initialize the variable 'ret' to silence this warning int ret; ^ = 0 1 error generated. Cc: Luben Tuikov Cc: Alex Deucher Cc: Christian König Cc: Lijo Lazar Cc: Hawking Zhang Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 8901e73fd700..4dabf910334b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -645,7 +645,6 @@ static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool value; - int ret; if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; @@ -655,9 +654,9 @@ static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask) gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask); if (!amdgpu_sriov_vf(adev)) - ret = gfxhub_v1_2_xcc_gart_enable(adev, inst_mask); + return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask); - return ret; + return 0; } static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask) From 332bb09352a69b8e7cf0825575f90581d3695135 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 19 May 2023 11:34:04 +0800 Subject: [PATCH 634/861] drm/amdgpu: remove unused definition mmhub_v1_8_mmea_cgtt_clk_cntl_reg is defined but not used. Reported-by: kernel test robot Signed-off-by: Tao Zhou Acked-by: Srinivasan Shanmugam Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 8600e42434e7..bf3b0f382c89 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -757,14 +757,6 @@ static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev) mmhub_v1_8_inst_query_ras_err_status(adev, i); } -static const uint32_t mmhub_v1_8_mmea_cgtt_clk_cntl_reg[] = { - regMMEA0_CGTT_CLK_CTRL, - regMMEA1_CGTT_CLK_CTRL, - regMMEA2_CGTT_CLK_CTRL, - regMMEA3_CGTT_CLK_CTRL, - regMMEA4_CGTT_CLK_CTRL, -}; - static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev, uint32_t mmhub_inst) { From 89fb3020d68d46807e1341ad8acae53cdf197234 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Tue, 16 May 2023 09:55:38 +0800 Subject: [PATCH 635/861] drm/amdgpu: init the XCC_DOORBELL_FENCE regs add the the init_registers callback for nbio_v7_9 Signed-off-by: Shiwu Zhang Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 755259e96bbc..ad70086de9b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -424,6 +424,22 @@ nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, u32 *supp_modes) return ffs(tmp); } +static void nbio_v7_9_init_registers(struct amdgpu_device *adev) +{ + u32 inst_mask; + int i; + + WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, + 0xff & ~(adev->gfx.xcc_mask)); + + inst_mask = adev->aid_mask & ~1U; + for_each_inst(i, inst_mask) { + WREG32_SOC15_EXT(NBIO, i, regXCC_DOORBELL_FENCE, i, + XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK); + + } +} + const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -447,4 +463,5 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .set_compute_partition_mode = nbio_v7_9_set_compute_partition_mode, .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, + .init_registers = nbio_v7_9_init_registers, }; From a64f7eb026ea7205bf9dfadabd746e47c5717b5d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 18 May 2023 15:52:08 +0200 Subject: [PATCH 636/861] drm/radeon: stop including swiotlb.h radeon does not need swiotlb.h, so stop including it. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_ttm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 06a53ecc04a2..10794be30239 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include From 1becc57cd1a905e2aa0e1eca60d2a37744525c4a Mon Sep 17 00:00:00 2001 From: Nikita Zhandarovich Date: Fri, 19 May 2023 08:33:27 -0700 Subject: [PATCH 637/861] drm/radeon: fix possible division-by-zero errors Function rv740_get_decoded_reference_divider() may return 0 due to unpredictable reference divider value calculated in radeon_atom_get_clock_dividers(). This will lead to division-by-zero error once that value is used as a divider in calculating 'clk_s'. While unlikely, this issue should nonetheless be prevented so add a sanity check for such cases by testing 'decoded_ref' value against 0. Found by Linux Verification Center (linuxtesting.org) with static analysis tool SVACE. v2: minor coding style fixes (Alex) In practice this should actually happen as the vbios should be properly populated. Fixes: 66229b200598 ("drm/radeon/kms: add dpm support for rv7xx (v4)") Signed-off-by: Nikita Zhandarovich Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cypress_dpm.c | 8 ++++++-- drivers/gpu/drm/radeon/ni_dpm.c | 8 ++++++-- drivers/gpu/drm/radeon/rv740_dpm.c | 8 ++++++-- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index fdddbbaecbb7..72a0768df00f 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -557,8 +557,12 @@ static int cypress_populate_mclk_value(struct radeon_device *rdev, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); - u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); - u32 clk_v = ss.percentage * + u32 clk_s, clk_v; + + if (!decoded_ref) + return -EINVAL; + clk_s = reference_clock * 5 / (decoded_ref * ss.rate); + clk_v = ss.percentage * (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625); mpll_ss1 &= ~CLKV_MASK; diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 672d2239293e..3e1c1a392fb7 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2241,8 +2241,12 @@ static int ni_populate_mclk_value(struct radeon_device *rdev, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); - u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); - u32 clk_v = ss.percentage * + u32 clk_s, clk_v; + + if (!decoded_ref) + return -EINVAL; + clk_s = reference_clock * 5 / (decoded_ref * ss.rate); + clk_v = ss.percentage * (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625); mpll_ss1 &= ~CLKV_MASK; diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c index d57a3e1df8d6..4464fd21a302 100644 --- a/drivers/gpu/drm/radeon/rv740_dpm.c +++ b/drivers/gpu/drm/radeon/rv740_dpm.c @@ -249,8 +249,12 @@ int rv740_populate_mclk_value(struct radeon_device *rdev, ASIC_INTERNAL_MEMORY_SS, vco_freq)) { u32 reference_clock = rdev->clock.mpll.reference_freq; u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div); - u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); - u32 clk_v = 0x40000 * ss.percentage * + u32 clk_s, clk_v; + + if (!decoded_ref) + return -EINVAL; + clk_s = reference_clock * 5 / (decoded_ref * ss.rate); + clk_v = 0x40000 * ss.percentage * (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000); mpll_ss1 &= ~CLKV_MASK; From d4281b49c1b6afc66c470c85019fc1eceb676a78 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 19 May 2023 14:20:17 +0800 Subject: [PATCH 638/861] drm/amd/pm: add missing NotifyPowerSource message mapping for SMU13.0.7 Otherwise, the power source switching will fail due to message unavailable. Fixes: bf4823267a81 ("drm/amd/pm: fix possible power mode mismatch between driver and PMFW") Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 1b6116cf8b4c..cf6827179fd1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { From a3ffabb25077059427434368a1c65c176a0f93d0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 19 Apr 2023 16:02:44 +0530 Subject: [PATCH 639/861] drm/amdgpu: Disable interrupt tracker on NBIOv7.9 Enabling nBIF interrupt history tracker prevents LCLK deep sleep, hence disable it Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 2 ++ drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index ad70086de9b5..e082f6343d20 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -432,6 +432,8 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, 0xff & ~(adev->gfx.xcc_mask)); + WREG32_SOC15(NBIO, 0, regBIFC_GFX_INT_MONITOR_MASK, 0x7ff); + inst_mask = adev->aid_mask & ~1U; for_each_inst(i, inst_mask) { WREG32_SOC15_EXT(NBIO, i, regXCC_DOORBELL_FENCE, i, diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h index 033f2796c1e3..c8a15c8f4822 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h @@ -6201,6 +6201,8 @@ #define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL2_BASE_IDX 8 #define regBIFC_BME_ERR_LOG_HB 0xe8ab #define regBIFC_BME_ERR_LOG_HB_BASE_IDX 8 +#define regBIFC_GFX_INT_MONITOR_MASK 0xe8ad +#define regBIFC_GFX_INT_MONITOR_MASK_BASE_IDX 8 #define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC 0xe8c0 #define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC_BASE_IDX 8 #define regBIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC 0xe8c1 From ab22ecabe99922db4bec8e2b439336f865bbc117 Mon Sep 17 00:00:00 2001 From: Jonatas Esteves Date: Sat, 20 May 2023 10:39:52 -0300 Subject: [PATCH 640/861] drm/amd/pm: Fix output of pp_od_clk_voltage Printing the other clock types should not be conditioned on being able to print OD_SCLK. Some GPUs currently have limited capability of only printing a subset of these. Since this condition was introduced in v5.18-rc1, reading from `pp_od_clk_voltage` has been returning empty on the Asus ROG Strix G15 (2021). Fixes: 79c65f3fcbb1 ("drm/amd/pm: do not expose power implementation details to amdgpu_pm.c") Reviewed-by: Evan Quan Signed-off-by: Jonatas Esteves Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 40100b77b2d9..da0da03569e8 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -871,13 +871,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } if (ret == -ENOENT) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); - if (size > 0) { - size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); - } + size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); } if (size == 0) From 9f77af014cbc3b77a2f5b8cbce8262ff97e94aa7 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 22 May 2023 10:02:52 +0100 Subject: [PATCH 641/861] drm/amdgpu: Fix a couple of spelling mistakes in info and debug messages There are a couple of spelling mistakes, one in a dev_info message and the other in a dev_debug message. Fix them. Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 59b8b26e2caf..789cc16e1be7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1007,7 +1007,7 @@ int psp_spatial_partition(struct psp_context *psp, int mode) cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; cmd->cmd.cmd_spatial_part.mode = mode; - dev_info(psp->adev->dev, "Requesting %d paritions through PSP", mode); + dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); release_psp_cmd_buf(psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index daeb6bcc9245..e9586a0dc335 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -359,7 +359,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, file_priv->minor->index, i); return -ENOENT; } - dev_dbg(adev->dev, "renderD%d partition %d openned!", + dev_dbg(adev->dev, "renderD%d partition %d opened!", file_priv->minor->index, i); fpriv->xcp_id = i; break; From 6dabce860d40703d7c27b71a120317f09293cf9c Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 22 May 2023 00:30:15 -0700 Subject: [PATCH 642/861] drm/amdgpu: Fix unsigned comparison with zero in gmc_v9_0_process_interrupt() Smatch warns: drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c:579: unsigned 'xcc_id' is never less than zero. gfx_v9_4_3_ih_to_xcc_inst() returns negative numbers as well. Fix this by changing type of xcc_id to int. Fixes: 98b2e9cad227 ("drm/amdgpu: correct the vmhub index when page fault occurs") Signed-off-by: Harshit Mogalapalli Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f70e666cecf2..1e8b2aaa48c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,8 +557,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, const char *hub_name; u64 addr; uint32_t cam_index = 0; - int ret; - uint32_t node_id, xcc_id = 0; + int ret, xcc_id = 0; + uint32_t node_id; node_id = entry->node_id; From 87f4c2d9205c6646b25081581e810a05cc9d2799 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 22 May 2023 13:50:28 +0200 Subject: [PATCH 643/861] drm/amd/pm: mark irq functions as 'static' Two newly added functions cause a warning because they lack a prototype: drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu13/smu_v13_0_6_ppt.c:1328:5: error: no previous prototype for 'smu_v13_0_6_set_irq_state' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu13/smu_v13_0_6_ppt.c:1368:5: error: no previous prototype for 'smu_v13_0_6_register_irq_handler' [-Werror=missing-prototypes] They are only used locally, so just mark them static. Fixes: 676915e4108f ("drm/amd/pm: Add ih for SMU v13.0.6 thermal throttling") Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index a712b2bf2d25..41b49cc827cd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1325,7 +1325,7 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, return 0; } -int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev, +static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned tyep, enum amdgpu_interrupt_state state) @@ -1365,7 +1365,7 @@ static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = .process = smu_v13_0_6_irq_process, }; -int smu_v13_0_6_register_irq_handler(struct smu_context *smu) +static int smu_v13_0_6_register_irq_handler(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct amdgpu_irq_src *irq_src = &smu->irq_source; From d522458e63136ccccea18077687ceff1d31527ca Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 22 May 2023 13:50:29 +0200 Subject: [PATCH 644/861] drm/amdkfd: mark local functions as static The file was newly added and causes some -Wmissing-prototype warnings: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c:57:5: error: no previous prototype for 'kgd_gfx_v9_4_3_hqd_sdma_load' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c:126:5: error: no previous prototype for 'kgd_gfx_v9_4_3_hqd_sdma_dump' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c:163:6: error: no previous prototype for 'kgd_gfx_v9_4_3_hqd_sdma_is_occupied' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c:181:5: error: no previous prototype for 'kgd_gfx_v9_4_3_hqd_sdma_destroy' [-Werror=missing-prototypes] Mark these all as 'static' since there are no outside callers. Fixes: a805889a1531 ("drm/amdkfd: Update SDMA queue management for GFX9.4.3") Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 81dfbe39fd8e..5b4b7f8b92a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -54,7 +54,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, return retval; } -int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, +static int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { struct v9_sdma_mqd *m; @@ -123,7 +123,7 @@ int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, return 0; } -int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, +static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { @@ -160,7 +160,7 @@ int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, return 0; } -bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) +static bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; @@ -178,7 +178,7 @@ bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) return false; } -int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, +static int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { struct v9_sdma_mqd *m; From 1b177b5c6846f20be013b45c36c24264049c81bf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 22 May 2023 13:50:30 +0200 Subject: [PATCH 645/861] drm/amdgpu:mark aqua_vanjaram_reg_init.c function as static A few newly added global functions have no prototype, which causes warnings: drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:169:5: error: no previous prototype for 'aqua_vanjaram_select_scheds' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:310:5: error: no previous prototype for '__aqua_vanjaram_get_xcc_per_xcp' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:337:5: error: no previous prototype for '__aqua_vanjaram_get_xcp_ip_info' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:593:5: error: no previous prototype for 'aqua_vanjaram_get_xcp_ip_details' [-Werror=missing-prototypes] There are no callers from other files, so just mark them as 'static'. Fixes: cd7d8400aa04 ("drm/amdgpu: add partition schedule for GC(9, 4, 3)") Fixes: 9cb18287d8f1 ("drm/amdgpu: Add SOC partition funcs for GC v9.4.3") Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 68d1a0fc5f5d..a595bb958215 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -166,7 +166,7 @@ static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) return aqua_vanjaram_xcp_sched_list_update(adev); } -int aqua_vanjaram_select_scheds( +static int aqua_vanjaram_select_scheds( struct amdgpu_device *adev, u32 hw_ip, u32 hw_prio, @@ -307,7 +307,7 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) return mode; } -int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) { int num_xcc, num_xcc_per_xcp = 0; @@ -334,7 +334,7 @@ int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) return num_xcc_per_xcp; } -int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, +static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip) { @@ -590,7 +590,7 @@ static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr, return r; } -int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, +static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip) { From 1f9bb94f128f7d13a67fbff5eca730cc2e8842a6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 22 May 2023 13:50:31 +0200 Subject: [PATCH 646/861] drm/amdgpu: use %pad format string for dma_addr_t DMA addresses can be shorter than u64, which results in a broken debug output: drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c: In function 'amdgpu_gart_table_ram_alloc': drivers/gpu/drm/amd/amdgpu/amdgpu.h:41:22: error: format '%llx' expects argument of type 'long long unsigned int', but argument 4 has type 'dma_addr_t' {aka 'unsigned int'} [-Werror=format=] drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c:146:9: note: in expansion of macro 'dev_info' 146 | dev_info(adev->dev, "%s dma_addr:%llx\n", __func__, dma_addr); Use the special %pad format string and pass the DMA address by reference. Fixes: c9a502e981a9 ("drm/amdgpu: Allocate GART table in RAM for AMD APU") Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index a070adf30c88..73b8cca35bab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -143,7 +143,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev) return -EFAULT; } - dev_info(adev->dev, "%s dma_addr:%llx\n", __func__, dma_addr); + dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr); /* Create SG table */ sg = kmalloc(sizeof(*sg), GFP_KERNEL); if (!sg) { From 1501fe94eedd18243b84008aecc25f4f3c4fa48d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 22 May 2023 13:50:32 +0200 Subject: [PATCH 647/861] drm/amdgpu: fix acpi build warnings Two newly introduced functions are in the global namespace but have no prototypes or callers outside of amdgpu_acpi.c, another function is static but only has a caller inside of an #ifdef: drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c:902:13: error: no previous prototype for 'amdgpu_acpi_get_node_id' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c:928:30: error: no previous prototype for 'amdgpu_acpi_get_dev' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c:860:33: error: 'amdgpu_acpi_get_numa_info' defined but not used [-Werror=unused-function] Avoid the warnings by marking all of them static and ensuring that the compiler is able to see the callsites. v2: rebase on latest code (Alex) Fixes: fa0497c34eb7 ("drm/amdgpu: Add API to get numa information of XCC") Fixes: 1cc823011a23 ("drm/amdgpu: Store additional numa node information") Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 02a66844e73e..3fb2c3af0998 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -901,7 +901,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) * * Returns ACPI STATUS OK with Node ID on success or the corresponding failure reason */ -acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, +static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, struct amdgpu_numa_info **numa_info) { #ifdef CONFIG_ACPI_NUMA @@ -927,7 +927,7 @@ acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, #endif } -struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) +static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) { struct amdgpu_acpi_dev_info *acpi_dev; From 803e4c9efc79c96796efbecab9ed53267d051256 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 22 May 2023 08:30:13 -0400 Subject: [PATCH 648/861] drm/amdgpu: remove unused variable num_xcc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc with W=1 reports drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:2138:13: error: variable ‘num_xcc’ set but not used [-Werror=unused-but-set-variable] 2138 | int num_xcc; | ^~~~~~~ This variable is not used so remove it. Signed-off-by: Tom Rix Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 6a1a28df606c..cb34da896737 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2135,9 +2135,6 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, static int gfx_v9_4_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int num_xcc; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); From 665d49c27eff01c91a155a37f025b981c2f73a3b Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 09:24:00 +0800 Subject: [PATCH 649/861] drm/amd/pm: reverse mclk and fclk clocks levels for SMU v13.0.4 This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 0a0a7debb3ae..46a8a366f287 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; From d9ed111b76e3ebe1d15b7db746d498666a396de1 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 10:28:05 +0800 Subject: [PATCH 650/861] drm/amd/pm: reverse mclk clocks levels for SMU v13.0.5 This patch reverses the DPM clocks levels output of pp_dpm_mclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 165b2470b017..7c3ac535f68a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -866,7 +866,7 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; @@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; From 06aade19bb2433001f4d10f1424a803b3f63734a Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 10:35:59 +0800 Subject: [PATCH 651/861] drm/amd/pm: reverse mclk and fclk clocks levels for yellow carp This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index ac5fcca0e47f..a92da336ecec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1000,7 +1000,7 @@ out: static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; From 63b9acdf06200bb6537a3a479741b4cb52488a89 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 11:10:19 +0800 Subject: [PATCH 652/861] drm/amd/pm: reverse mclk and fclk clocks levels for vangogh This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7433dcaa16e0..067b4e0b026c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) From acf429dcac1440169a3b28da784cbda72f3b678b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 18 May 2023 15:08:16 -0400 Subject: [PATCH 653/861] drm/amdkfd: Align partition memory size to page size The compute partition memory size calculated from KFD_XCP_MEMORY_SIZE may not align to page size if xcp_mgr->num_xcp_per_mem_partition is 6. Change the KFD_XCP_MEMORY_SIZE macro to return page align size, so KFD node memory size reported in sysfs is page align size, to avoid application VRAM allocation failure because application may use the size directly and Thunk requires the memory allocation size is page size align. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5de92c9ab18f..66f80b9ab0c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -804,7 +804,7 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { tmp = adev->gmc.mem_partitions[mem_id].size; do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); - return tmp; + return ALIGN_DOWN(tmp, PAGE_SIZE); } else { return adev->gmc.real_vram_size; } From 9f173a80305d84f6b41bfb2a5482179e6e4957b1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 23 May 2023 10:34:30 +0200 Subject: [PATCH 654/861] drm/amd/display: avoid calling missing .resync_fifo_dccg_dio() The .resync_fifo_dccg_dio() callback pointer was added in an #ifdef block, but is called unconditionally: drivers/gpu/drm/amd/amdgpu/../display/dc/dce110/dce110_hw_sequencer.c:2292:31: error: 'struct hwseq_private_funcs' has no member named 'resync_fifo_dccg_dio' Add the same #ifdef around the caller as well. Fixes: 3e8d74cb128f ("drm/amd/display: Trigger DIO FIFO resync on commit streams") Signed-off-by: Arnd Bergmann Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index c6fe2c00aedb..d4cacb8df631 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2289,8 +2289,10 @@ enum dc_status dce110_apply_ctx_to_hw( if (DC_OK != status) return status; +#ifdef CONFIG_DRM_AMD_DC_FP if (hws->funcs.resync_fifo_dccg_dio) hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } if (dc->fbc_compressor) From 025723e059ab454823e6aa21277976178f23d120 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Tue, 23 May 2023 07:49:37 -0400 Subject: [PATCH 655/861] drm/amd/display: remove unused variables res_create_maximus_funcs and debug_defaults_diags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc with W=1 reports drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_resource.c:1069:43: error: ‘res_create_maximus_funcs’ defined but not used [-Werror=unused-const-variable=] 1069 | static const struct resource_create_funcs res_create_maximus_funcs = { | ^~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_resource.c:727:38: error: ‘debug_defaults_diags’ defined but not used [-Werror=unused-const-variable=] 727 | static const struct dc_debug_options debug_defaults_diags = { | ^~~~~~~~~~~~~~~~~~~~ These variables are not used so remove them. Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Tom Rix Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn20/dcn20_resource.c | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 7dcae3183e07..6ef7e2634991 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -724,22 +724,6 @@ static const struct dc_debug_options debug_defaults_drv = { .underflow_assert_delay_us = 0xFFFFFFFF, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = true, - .scl_reset_length10 = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_tri_buf = true, -}; - void dcn20_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -1066,13 +1050,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn20_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn20_hwseq_create, -}; - static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); void dcn20_clock_source_destroy(struct clock_source **clk_src) From e7665d0ca7938a8f921760a780bdc55c5eda6df0 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Tue, 23 May 2023 10:18:11 +0800 Subject: [PATCH 656/861] drm/amdgpu: Remove duplicate include ./drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c: amdgpu_xcp.h is included more than once. Reported-by: Abaci Robot Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=5281 Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index cb34da896737..c54bf0b38fe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -23,7 +23,6 @@ #include #include "amdgpu.h" -#include "amdgpu_xcp.h" #include "amdgpu_gfx.h" #include "soc15.h" #include "soc15d.h" From aaff9c089947cdb366ffddf4d4fb9747b6469d3e Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Tue, 23 May 2023 11:17:09 +0800 Subject: [PATCH 657/861] drm/amdgpu: Modify mismatched function name No functional modification involved. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:426: warning: expecting prototype for sdma_v4_4_2_gfx_stop(). Prototype was for sdma_v4_4_2_inst_gfx_stop() instead. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:457: warning: expecting prototype for sdma_v4_4_2_rlc_stop(). Prototype was for sdma_v4_4_2_inst_rlc_stop() instead. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:470: warning: expecting prototype for sdma_v4_4_2_page_stop(). Prototype was for sdma_v4_4_2_inst_page_stop() instead. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:506: warning: expecting prototype for sdma_v4_4_2_ctx_switch_enable(). Prototype was for sdma_v4_4_2_inst_ctx_switch_enable() instead. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:561: warning: expecting prototype for sdma_v4_4_2_enable(). Prototype was for sdma_v4_4_2_inst_enable() instead. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:798: warning: expecting prototype for sdma_v4_4_2_rlc_resume(). Prototype was for sdma_v4_4_2_inst_rlc_resume() instead. drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:814: warning: expecting prototype for sdma_v4_4_2_load_microcode(). Prototype was for sdma_v4_4_2_inst_load_microcode() instead. Reported-by: Abaci Robot Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=5283 Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index bf47eb33c12e..590b08585901 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -415,7 +415,7 @@ static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 /** - * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines + * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer * @@ -446,7 +446,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, } /** - * sdma_v4_4_2_rlc_stop - stop the compute async dma engines + * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines * * @adev: amdgpu_device pointer * @@ -459,7 +459,7 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev, } /** - * sdma_v4_4_2_page_stop - stop the page async dma engines + * sdma_v4_4_2_inst_page_stop - stop the page async dma engines * * @adev: amdgpu_device pointer * @@ -494,7 +494,7 @@ static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev, } /** - * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch + * sdma_v4_4_2_inst_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. @@ -548,7 +548,7 @@ static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev, } /** - * sdma_v4_4_2_enable - stop the async dma engines + * sdma_v4_4_2_inst_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. @@ -786,7 +786,7 @@ static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev) } /** - * sdma_v4_4_2_rlc_resume - setup and start the async dma engines + * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * @@ -802,7 +802,7 @@ static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev, } /** - * sdma_v4_4_2_load_microcode - load the sDMA ME ucode + * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode * * @adev: amdgpu_device pointer * From 1fbc69b8f543864fa0a6c4b5d95fb5f9f5d23d33 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Tue, 23 May 2023 11:32:02 +0800 Subject: [PATCH 658/861] drm/amdgpu/vcn: Modify mismatched function name No functional modification involved. drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c:374: warning: expecting prototype for vcn_v4_0_mc_resume_dpg_mode(). Prototype was for vcn_v4_0_3_mc_resume_dpg_mode() instead. drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c:631: warning: expecting prototype for vcn_v4_0_enable_clock_gating(). Prototype was for vcn_v4_0_3_enable_clock_gating() instead. Reported-by: Abaci Robot Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=5284 Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 339842382a1e..5d67b8b8a3d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -362,7 +362,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx) } /** - * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode + * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode * * @adev: amdgpu_device pointer * @inst_idx: instance number index @@ -620,7 +620,7 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, } /** - * vcn_v4_0_enable_clock_gating - enable VCN clock gating + * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer * @inst_idx: instance number From c3aaca43fb07ce05f3a3bd85288eb3d500469be5 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 17 May 2023 15:53:50 -0400 Subject: [PATCH 659/861] drm/amdgpu: Add a low priority scheduler for VRAM clearing Add a low priority DRM scheduler for VRAM clearing instead of using the exisiting high priority scheduler. Use the high priority scheduler for migrations and evictions. Signed-off-by: Mukul Joshi Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 37 ++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 5 ++- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 42c02f48c3a8..d9e331508389 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -627,7 +627,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence; - r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); + r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true); if (unlikely(r)) goto fail_unreserve; @@ -1354,7 +1354,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) return; - r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); + r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true); if (!WARN_ON(r)) { amdgpu_bo_fence(abo, fence, false); dma_fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0abad5f89421..473eeac1f03b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -383,7 +383,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { struct dma_fence *wipe_fence = NULL; - r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence); + r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence, + false); if (r) { goto error; } else if (wipe_fence) { @@ -2036,8 +2037,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) r); return; } + + r = drm_sched_entity_init(&adev->mman.delayed, + DRM_SCHED_PRIORITY_NORMAL, &sched, + 1, NULL); + if (r) { + DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", + r); + goto error_free_entity; + } } else { drm_sched_entity_destroy(&adev->mman.entity); + drm_sched_entity_destroy(&adev->mman.delayed); dma_fence_put(man->move); man->move = NULL; } @@ -2049,6 +2060,11 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) size = adev->gmc.visible_vram_size; man->size = size; adev->mman.buffer_funcs_enabled = enable; + + return; + +error_free_entity: + drm_sched_entity_destroy(&adev->mman.entity); } static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, @@ -2056,14 +2072,16 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, unsigned int num_dw, struct dma_resv *resv, bool vm_needs_flush, - struct amdgpu_job **job) + struct amdgpu_job **job, + bool delayed) { enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED; int r; - - r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + struct drm_sched_entity *entity = delayed ? &adev->mman.delayed : + &adev->mman.entity; + r = amdgpu_job_alloc_with_ib(adev, entity, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4, pool, job); if (r) @@ -2104,7 +2122,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, - resv, vm_needs_flush, &job); + resv, vm_needs_flush, &job, false); if (r) return r; @@ -2140,7 +2158,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, uint64_t dst_addr, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, - bool vm_needs_flush) + bool vm_needs_flush, bool delayed) { struct amdgpu_device *adev = ring->adev; unsigned int num_loops, num_dw; @@ -2153,7 +2171,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, - &job); + &job, delayed); if (r) return r; @@ -2176,7 +2194,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, - struct dma_fence **f) + struct dma_fence **f, + bool delayed) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -2205,7 +2224,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, goto error; r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, - &next, true); + &next, true, delayed); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 8ef048a0a33e..e82b1edee7a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -61,6 +61,8 @@ struct amdgpu_mman { struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ struct drm_sched_entity entity; + /* Scheduler entity for VRAM clearing */ + struct drm_sched_entity delayed; struct amdgpu_vram_mgr vram_mgr; struct amdgpu_gtt_mgr gtt_mgr; @@ -152,7 +154,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, - struct dma_fence **fence); + struct dma_fence **fence, + bool delayed); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); From 353491c48697df8a133bc468dc1b8ef65045254a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 22 May 2023 15:08:22 +0200 Subject: [PATCH 660/861] Revert "drm/amd/display: Block optimize on consecutive FAMS enables" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 751e17147953bc30036b8fe0eaaf780b6951404c. It depends on its parent commit, which we want to revert. Acked-by: Alex Deucher Signed-off-by: Michel Dänzer [Hamza: fix a whitespace issue in dcn30_prepare_bandwidth()] Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 --- .../drm/amd/display/dc/dcn30/dcn30_hwseq.c | 22 +++---------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index b0a13eb8318c..b56f799f10e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2126,9 +2126,6 @@ void dcn20_optimize_bandwidth( dc_dmub_srv_p_state_delegate(dc, true, context); context->bw_ctx.bw.dcn.clk.p_state_change_support = true; - dc->clk_mgr->clks.fw_based_mclk_switching = true; - } else { - dc->clk_mgr->clks.fw_based_mclk_switching = false; } dc->clk_mgr->funcs->update_clocks( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index ad4e1023b00e..368fd5a71680 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -949,13 +949,9 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - /* Any transition into an FPO config should disable MCLK switching first to avoid - * driver and FW P-State synchronization issues. - */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { dc->optimized_required = true; context->bw_ctx.bw.dcn.clk.p_state_change_support = false; } @@ -966,19 +962,7 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - /* - * enabled -> enabled: do not disable - * enabled -> disabled: disable - * disabled -> enabled: don't care - * disabled -> disabled: don't care - */ - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) - dc_dmub_srv_p_state_delegate(dc, false, context); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - /* After disabling P-State, restore the original value to ensure we get the correct P-State - * on the next optimize. */ - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; - } + dc_dmub_srv_p_state_delegate(dc, false, context); } From 413521a4c9f4bbb4637b9ff3427070325890b08f Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 24 May 2023 01:17:48 +0530 Subject: [PATCH 661/861] drm/amd/amdgpu: Fix warnings in amdgpu_irq.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit checkpatch reports: WARNING: Prefer 'unsigned int' to bare use of 'unsigned' WARNING: braces {} are not necessary for any arm of this statement + if (nvec <= 0) { [...] + } else { [...] WARNING: Block comments use a trailing */ on a separate line Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 27 +++++++++++++------------ 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 3dafaf70c987..5667053c295c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -124,7 +124,7 @@ const int node_id_to_phys_map[NODEID_MAX] = { void amdgpu_irq_disable_all(struct amdgpu_device *adev) { unsigned long irqflags; - unsigned i, j, k; + unsigned int i, j, k; int r; spin_lock_irqsave(&adev->irq.lock, irqflags); @@ -283,11 +283,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev) int nvec = pci_msix_vec_count(adev->pdev); unsigned int flags; - if (nvec <= 0) { + if (nvec <= 0) flags = PCI_IRQ_MSI; - } else { + else flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; - } + /* we only need one vector */ nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); if (nvec > 0) { @@ -346,7 +346,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev) */ void amdgpu_irq_fini_sw(struct amdgpu_device *adev) { - unsigned i, j; + unsigned int i, j; for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) @@ -380,7 +380,7 @@ void amdgpu_irq_fini_sw(struct amdgpu_device *adev) * 0 on success or error code otherwise */ int amdgpu_irq_add_id(struct amdgpu_device *adev, - unsigned client_id, unsigned src_id, + unsigned int client_id, unsigned int src_id, struct amdgpu_irq_src *source) { if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) @@ -432,7 +432,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, { u32 ring_index = ih->rptr >> 2; struct amdgpu_iv_entry entry; - unsigned client_id, src_id; + unsigned int client_id, src_id; struct amdgpu_irq_src *src; bool handled = false; int r; @@ -507,7 +507,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev, * Updates interrupt state for the specific source (all ASICs). */ int amdgpu_irq_update(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type) + struct amdgpu_irq_src *src, unsigned int type) { unsigned long irqflags; enum amdgpu_interrupt_state state; @@ -516,7 +516,8 @@ int amdgpu_irq_update(struct amdgpu_device *adev, spin_lock_irqsave(&adev->irq.lock, irqflags); /* We need to determine after taking the lock, otherwise - we might disable just enabled interrupts again */ + * we might disable just enabled interrupts again + */ if (amdgpu_irq_enabled(adev, src, type)) state = AMDGPU_IRQ_STATE_ENABLE; else @@ -570,7 +571,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) * 0 on success or error code otherwise */ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type) + unsigned int type) { if (!adev->irq.installed) return -ENOENT; @@ -600,7 +601,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, * 0 on success or error code otherwise */ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type) + unsigned int type) { if (!adev->irq.installed) return -ENOENT; @@ -634,7 +635,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, * invalid parameters */ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type) + unsigned int type) { if (!adev->irq.installed) return false; @@ -747,7 +748,7 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev) * Returns: * Linux IRQ */ -unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) +unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id) { adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); From 360930985ec9f394c82ba0b235403b4a366d1560 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 22 May 2023 15:08:23 +0200 Subject: [PATCH 662/861] Revert "drm/amd/display: Do not set drr on pipe commit" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit e101bf95ea87ccc03ac2f48dfc0757c6364ff3c7. Caused a regression: Samsung Odyssey Neo G9, running at 5120x1440@240/VRR, connected to Navi 21 via DisplayPort, blanks and the GPU hangs while starting the Steam game Assetto Corsa Competizione (via Proton 7.0). Example dmesg excerpt: amdgpu 0000:0c:00.0: [drm] ERROR [CRTC:82:crtc-0] flip_done timed out NMI watchdog: Watchdog detected hard LOCKUP on cpu 6 [...] RIP: 0010:amdgpu_device_rreg.part.0+0x2f/0xf0 [amdgpu] Code: 41 54 44 8d 24 b5 00 00 00 00 55 89 f5 53 48 89 fb 4c 3b a7 60 0b 00 00 73 6a 83 e2 02 74 29 4c 03 a3 68 0b 00 00 45 8b 24 24 <48> 8b 43 08 0f b7 70 3e 66 90 44 89 e0 5b 5d 41 5c 31 d2 31 c9 31 RSP: 0000:ffffb39a119dfb88 EFLAGS: 00000086 RAX: ffffffffc0eb96a0 RBX: ffff9e7963dc0000 RCX: 0000000000007fff RDX: 0000000000000000 RSI: 0000000000004ff6 RDI: ffff9e7963dc0000 RBP: 0000000000004ff6 R08: ffffb39a119dfc40 R09: 0000000000000010 R10: ffffb39a119dfc40 R11: ffffb39a119dfc44 R12: 00000000000e05ae R13: 0000000000000000 R14: ffff9e7963dc0010 R15: 0000000000000000 FS: 000000001012f6c0(0000) GS:ffff9e805eb80000(0000) knlGS:000000007fd40000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000461ca000 CR3: 00000002a8a20000 CR4: 0000000000350ee0 Call Trace: dm_read_reg_func+0x37/0xc0 [amdgpu] generic_reg_get2+0x22/0x60 [amdgpu] optc1_get_crtc_scanoutpos+0x6a/0xc0 [amdgpu] dc_stream_get_scanoutpos+0x74/0x90 [amdgpu] dm_crtc_get_scanoutpos+0x82/0xf0 [amdgpu] amdgpu_display_get_crtc_scanoutpos+0x91/0x190 [amdgpu] ? dm_read_reg_func+0x37/0xc0 [amdgpu] amdgpu_get_vblank_counter_kms+0xb4/0x1a0 [amdgpu] dm_pflip_high_irq+0x213/0x2f0 [amdgpu] amdgpu_dm_irq_handler+0x8a/0x200 [amdgpu] amdgpu_irq_dispatch+0xd4/0x220 [amdgpu] amdgpu_ih_process+0x7f/0x110 [amdgpu] amdgpu_irq_handler+0x1f/0x70 [amdgpu] __handle_irq_event_percpu+0x46/0x1b0 handle_irq_event+0x34/0x80 handle_edge_irq+0x9f/0x240 __common_interrupt+0x66/0x110 common_interrupt+0x5c/0xd0 asm_common_interrupt+0x22/0x40 Reviewed-by: Aurabindo Pillai Acked-by: Alex Deucher Signed-off-by: Michel Dänzer Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 6 ------ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 7 ------- 2 files changed, 13 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index b56f799f10e1..eaf9e9ccad2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2122,12 +2122,6 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dc_dmub_srv_p_state_delegate(dc, - true, context); - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; - } - dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 368fd5a71680..b9753867d97b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -951,18 +951,11 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context) { - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dc->optimized_required = true; - context->bw_ctx.bw.dcn.clk.p_state_change_support = false; - } - if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - - dc_dmub_srv_p_state_delegate(dc, false, context); } From 55a6dc60b47c817c644af2b505d46815d8b9219e Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 23 May 2023 11:59:53 -0400 Subject: [PATCH 663/861] drm/amdkfd: Set event interrupt class for GFX 9.4.3 Fix the warning during driver load because the event interrupt class is not set for GFX9.4.3. Signed-off-by: Mukul Joshi Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e92b93b2c14c..862a50f7b490 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -139,6 +139,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 0): /* VEGA20 */ case IP_VERSION(9, 4, 1): /* ARCTURUS */ case IP_VERSION(9, 4, 2): /* ALDEBARAN */ + case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ From d48a4f2c2809b882b58b428577707fe1b6f52673 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Mon, 22 May 2023 23:17:28 +0800 Subject: [PATCH 664/861] drm/amd/pm: reverse mclk and fclk clocks levels for renoir This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk for renoir. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5cdc07165480..8a8ba25c9ad7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) { - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) From 194224a54c8bbc896b1fdb4a10ca5789ea4b9e7d Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 23 May 2023 13:26:39 +0530 Subject: [PATCH 665/861] drm/amdgpu: Fix warnings Fix warnings reported by kernel test bot/smatch smatch warnings: drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c:65 amdgpu_xcp_run_transition() error: buffer overflow 'xcp_mgr->xcp' 8 <= 8 Reported-by: kernel test robot Reported-by: Dan Carpenter Link: https://lore.kernel.org/r/202305231453.I0bXngYn-lkp@intel.com/ Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index e9586a0dc335..fcdc0862d258 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -65,7 +65,7 @@ static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, struct amdgpu_xcp *xcp; int i, ret; - if (xcp_id > MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) + if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) return -EINVAL; xcp = &xcp_mgr->xcp[xcp_id]; From a569552f76889c8162b73add69afa161ff660ae4 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Wed, 24 May 2023 15:03:02 +0800 Subject: [PATCH 666/861] drm/amd/pm: resolve reboot exception for si oland During reboot test on arm64 platform, it may failure on boot. The error message are as follows: [ 1.706570][ 3] [ T273] [drm:si_thermal_enable_alert [amdgpu]] *ERROR* Could not enable thermal interrupts. [ 1.716547][ 3] [ T273] [drm:amdgpu_device_ip_late_init [amdgpu]] *ERROR* late_init of IP block failed -22 [ 1.727064][ 3] [ T273] amdgpu 0000:02:00.0: amdgpu_device_ip_late_init failed [ 1.734367][ 3] [ T273] amdgpu 0000:02:00.0: Fatal error during GPU init v2: squash in built warning fix (Alex) Signed-off-by: Zhenneng Li Reviewed-by: Guchun Chen Signed-off-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 29 ---------------------- 1 file changed, 29 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index d6d9e3b1b2c0..02e69ccff3ba 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif return 0; } From 61c31b8b6c5e386a9c2ddc4e2cf9d8ae46c8a1f9 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 12 May 2023 16:04:56 +0800 Subject: [PATCH 667/861] drm/amdgpu/sdma: set sched.ready status after ring/IB test in sdma sched.ready is nothing with ring initialization, it needs to set to be true after ring/IB test in amdgpu_ring_test_helper to tell the ring is ready for submission. Signed-off-by: Guchun Chen Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 2 -- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 2 -- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 2 -- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 4 ---- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 4 ---- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 -- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 6 +----- drivers/gpu/drm/amd/amdgpu/si_dma.c | 2 -- 9 files changed, 3 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 67d16236b216..52598fbc9b39 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -489,8 +489,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - - ring->sched.ready = true; } cik_sdma_enable(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index fd2a7b66ac56..51afc92994a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -466,8 +466,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - - ring->sched.ready = true; } sdma_v2_4_enable(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index e572389089d2..344202870aeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -734,8 +734,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) #endif /* enable DMA IBs */ WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - - ring->sched.ready = true; } /* unhalt the MEs */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 70b0d1fd9868..1f83eebfc8a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1114,8 +1114,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) #endif /* enable DMA IBs */ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); - - ring->sched.ready = true; } /** @@ -1202,8 +1200,6 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) #endif /* enable DMA IBs */ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); - - ring->sched.ready = true; } static void diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 590b08585901..ff41fb577cdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -685,8 +685,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) #endif /* enable DMA IBs */ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); - - ring->sched.ready = true; } /** @@ -776,8 +774,6 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i) #endif /* enable DMA IBs */ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl); - - ring->sched.ready = true; } static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index a0077cf41295..5c4d4df9cf94 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -819,8 +819,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) /* enable DMA IBs */ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - ring->sched.ready = true; - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ sdma_v5_0_ctx_switch_enable(adev, true); sdma_v5_0_enable(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index efa2c84ee78e..6aae62b68f32 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -617,8 +617,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) /* enable DMA IBs */ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - ring->sched.ready = true; - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ sdma_v5_2_ctx_switch_enable(adev, true); sdma_v5_2_enable(adev, true); @@ -630,6 +628,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) return r; } + ring->sched.ready = true; + if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 79d09792d2ce..1c90b5c661fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -585,16 +585,12 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) /* enable DMA IBs */ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); - ring->sched.ready = true; - if (amdgpu_sriov_vf(adev)) sdma_v6_0_enable(adev, true); r = amdgpu_ring_test_helper(ring); - if (r) { - ring->sched.ready = false; + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index abca8b529721..42c4547f32ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -174,8 +174,6 @@ static int si_dma_start(struct amdgpu_device *adev) WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2); WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); - ring->sched.ready = true; - r = amdgpu_ring_test_helper(ring); if (r) return r; From 232f2431899cbe6c00c1350e35cfba91ea0c1c0b Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 12 May 2023 16:10:40 +0800 Subject: [PATCH 668/861] drm/amdgpu/gfx: set sched.ready status after ring/IB test in gfx sched.ready is nothing with ring initialization, it needs to set to be true after ring/IB test in amdgpu_ring_test_helper to tell the ring is ready for submission. Signed-off-by: Guchun Chen Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 24 ++++-------------------- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 25 ++++--------------------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 -- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 -- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 1 - 5 files changed, 8 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 0d15002eac69..f7ad883a70fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6073,7 +6073,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) u32 tmp; u32 rb_bufsz; u64 rb_addr, rptr_addr, wptr_gpu_addr; - u32 i; /* Set the write pointer delay */ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); @@ -6168,11 +6167,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ gfx_v10_0_cp_gfx_start(adev); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - ring->sched.ready = true; - } - return 0; } @@ -6470,7 +6464,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) - goto done; + return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { @@ -6480,23 +6474,14 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) } amdgpu_bo_unreserve(ring->mqd_obj); if (r) - goto done; + return r; } r = amdgpu_gfx_enable_kgq(adev, 0); if (r) - goto done; + return r; - r = gfx_v10_0_cp_gfx_start(adev); - if (r) - goto done; - - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - ring->sched.ready = true; - } -done: - return r; + return gfx_v10_0_cp_gfx_start(adev); } static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m, @@ -6812,7 +6797,6 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - ring->sched.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 4b7224de879e..da21bf868080 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -3228,7 +3228,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) u32 tmp; u32 rb_bufsz; u64 rb_addr, rptr_addr, wptr_gpu_addr; - u32 i; /* Set the write pointer delay */ WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); @@ -3320,11 +3319,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ gfx_v11_0_cp_gfx_start(adev); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - ring->sched.ready = true; - } - return 0; } @@ -3370,8 +3364,6 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); } - adev->gfx.kiq[0].ring.sched.ready = enable; - udelay(50); } @@ -3711,7 +3703,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) - goto done; + return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { @@ -3721,23 +3713,14 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) } amdgpu_bo_unreserve(ring->mqd_obj); if (r) - goto done; + return r; } r = amdgpu_gfx_enable_kgq(adev, 0); if (r) - goto done; + return r; - r = gfx_v11_0_cp_gfx_start(adev); - if (r) - goto done; - - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - ring->sched.ready = true; - } -done: - return r; + return gfx_v11_0_cp_gfx_start(adev); } static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6d0589dc1d6c..2f1ef75e126c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4283,7 +4283,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ amdgpu_ring_clear_ring(ring); gfx_v8_0_cp_gfx_start(adev); - ring->sched.ready = true; return 0; } @@ -4693,7 +4692,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - ring->sched.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8bf95a6b0767..fe090eafa366 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3144,7 +3144,6 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) /* start the ring */ gfx_v9_0_cp_gfx_start(adev); - ring->sched.ready = true; return 0; } @@ -3623,7 +3622,6 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - ring->sched.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c54bf0b38fe1..ed41a7862d9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1845,7 +1845,6 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - ring->sched.ready = true; return 0; } From 93ab59ac6d8311244a76ddb31e7ced4cb1e8f22c Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 12 May 2023 16:14:25 +0800 Subject: [PATCH 669/861] drm/amdgpu: switch to unified amdgpu_ring_test_helper This will simplify code. Signed-off-by: Guchun Chen Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 9 ++------- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 8 +------- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 7 +------ drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 8 ++------ 4 files changed, 6 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index f0f00466b59f..49bb6c03d606 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1305,14 +1305,9 @@ static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) if (!ring) continue; - r = amdgpu_ring_test_ring(ring); - if (r) { - DRM_DEV_ERROR(ring->adev->dev, - "ring %s test failed (%d)\n", - ring->name, r); + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } else - DRM_INFO("ring %s test pass\n", ring->name); r = amdgpu_ring_test_ib(ring, 1000 * 10); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 88262f10ef7c..36a123e6c8ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -815,13 +815,7 @@ static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev) kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring); - r = amdgpu_ring_test_ring(kiq_ring); - if (r) { - DRM_ERROR("kfq enable failed\n"); - kiq_ring->sched.ready = false; - } - - return r; + return amdgpu_ring_test_helper(kiq_ring); } static int mes_v10_1_queue_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 9a48328c6572..90b4a74ccf01 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -882,12 +882,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev) kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring); - r = amdgpu_ring_test_ring(kiq_ring); - if (r) { - DRM_ERROR("kfq enable failed\n"); - kiq_ring->sched.ready = false; - } - return r; + return amdgpu_ring_test_helper(kiq_ring); } static int mes_v11_0_queue_init(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 6aae62b68f32..a7b230e5a26d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -622,13 +622,9 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) sdma_v5_2_enable(adev, true); } - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } - - ring->sched.ready = true; if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); From 3525844d483bfb2236c1dd00f7a490297721ef78 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 28 Jul 2022 11:42:44 +0530 Subject: [PATCH 670/861] drm/amdgpu: Use single copy per SDMA instance type (v2) v1: Only single copy per instance type is required for PSP. All instance types use the same firmware copy. (Lijo) v2: Apply the change into amdgpu_sdma_init_microcode() due to rebase. (Morris) Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Shiwu Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 9568adaad5cb..78ec3420ef85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -252,6 +252,13 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, if (!duplicate && (instance != i)) continue; else { + /* Use a single copy per SDMA firmware type. PSP uses the same instance for all + * groups of SDMAs */ + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2) && + adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && + adev->sdma.num_inst_per_aid == i) { + break; + } info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; info->fw = adev->sdma.instance[i].fw; From bea35f7d4219999b5e4a9c8cdf6a17683b2ed1bd Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:40 +0800 Subject: [PATCH 671/861] drm/amd/display: remove unused definition Eliminate the following warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_resource.c:889:43: warning: unused variable 'res_create_maximus_funcs' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index a0625209c86d..26ddf73fd5b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -886,13 +886,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn10_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn10_hwseq_create, -}; - static void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); From 164f0791c5d10a2f0e947f6872a7c14ccd860085 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:41 +0800 Subject: [PATCH 672/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn21/dcn21_resource.c:1222:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn21/dcn21_resource.c:658:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn21/dcn21_resource.c | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 76268a7b7934..da6d42de0554 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -655,22 +655,6 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = true, - .disable_48mhz_pwrdwn = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1219,13 +1203,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn21_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn21_hwseq_create, -}; - static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, From 95c2f89895930538879e1c3f1467a8095e03d1a5 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:42 +0800 Subject: [PATCH 673/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn30/dcn30_resource.c:1079:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn30/dcn30_resource.c:731:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn30/dcn30_resource.c | 25 ------------------- 1 file changed, 25 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index cd94b5f5fdb4..f4ee4b3df596 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -728,24 +728,6 @@ static const struct dc_debug_options debug_defaults_drv = { .exit_idle_opt_for_cursor_updates = true }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, //No dmcu on DCN30 - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1076,13 +1058,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn30_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn30_hwseq_create, -}; - static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) { unsigned int i; From 200c7c8132318378feffc1e4b0189482f4c7f1b6 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 24 May 2023 12:51:23 -0500 Subject: [PATCH 674/861] drm/amd: Drop debugfs access to the DPCD There is already access to the DPCD from userspace through `CONFIG_DRM_DP_AUX_CHARDEV`, so it's unnecessary to reinvent the wheel with a set of extra debugfs nodes specific to amdgpu. The character device interface behaves more like you would expect in that you can seek/read/write all from the same file. Reviewed-by: Hamza Mahfooz Reviewed-by: Harry Wentland Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 4 - .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 107 ------------------ 2 files changed, 111 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 2e2413fd73a4..4561f55afa99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -661,10 +661,6 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; -#ifdef CONFIG_DEBUG_FS - uint32_t debugfs_dpcd_address; - uint32_t debugfs_dpcd_size; -#endif bool force_yuv420_output; struct dsc_preferred_settings dsc_settings; union dp_downstream_port_present mst_downstream_port_present; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 827fcb4fb3b3..82234397dd44 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1039,88 +1039,6 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b return write_size; } -static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - - if (size < sizeof(connector->debugfs_dpcd_address)) - return -EINVAL; - - r = copy_from_user(&connector->debugfs_dpcd_address, - buf, sizeof(connector->debugfs_dpcd_address)); - - return size - r; -} - -static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - - if (size < sizeof(connector->debugfs_dpcd_size)) - return -EINVAL; - - r = copy_from_user(&connector->debugfs_dpcd_size, - buf, sizeof(connector->debugfs_dpcd_size)); - - if (connector->debugfs_dpcd_size > 256) - connector->debugfs_dpcd_size = 0; - - return size - r; -} - -static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - char *data; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t write_size = connector->debugfs_dpcd_size; - - if (!write_size || size < write_size) - return -EINVAL; - - data = kzalloc(write_size, GFP_KERNEL); - if (!data) - return 0; - - r = copy_from_user(data, buf, write_size); - - dm_helpers_dp_write_dpcd(link->ctx, link, - connector->debugfs_dpcd_address, data, write_size - r); - kfree(data); - return write_size - r; -} - -static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - int r; - char *data; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t read_size = connector->debugfs_dpcd_size; - - if (!read_size || size < read_size) - return 0; - - data = kzalloc(read_size, GFP_KERNEL); - if (!data) - return 0; - - dm_helpers_dp_read_dpcd(link->ctx, link, - connector->debugfs_dpcd_address, data, read_size); - - r = copy_to_user(buf, data, read_size); - - kfree(data); - return read_size - r; -} - /* function: Read link's DSC & FEC capabilities * * @@ -2682,25 +2600,6 @@ static const struct file_operations sdp_message_fops = { .llseek = default_llseek }; -static const struct file_operations dp_dpcd_address_debugfs_fops = { - .owner = THIS_MODULE, - .write = dp_dpcd_address_write, - .llseek = default_llseek -}; - -static const struct file_operations dp_dpcd_size_debugfs_fops = { - .owner = THIS_MODULE, - .write = dp_dpcd_size_write, - .llseek = default_llseek -}; - -static const struct file_operations dp_dpcd_data_debugfs_fops = { - .owner = THIS_MODULE, - .read = dp_dpcd_data_read, - .write = dp_dpcd_data_write, - .llseek = default_llseek -}; - static const struct file_operations dp_max_bpc_debugfs_fops = { .owner = THIS_MODULE, .read = dp_max_bpc_read, @@ -2724,9 +2623,6 @@ static const struct { {"test_pattern", &dp_phy_test_pattern_fops}, {"hdcp_sink_capability", &hdcp_sink_capability_fops}, {"sdp_message", &sdp_message_fops}, - {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops}, - {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops}, - {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}, {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, @@ -3025,9 +2921,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) connector_debugfs_entries[i].fops); } - connector->debugfs_dpcd_address = 0; - connector->debugfs_dpcd_size = 0; - if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { debugfs_create_file(hdmi_debugfs_entries[i].name, From 4ba439b0b38456c61505568a7fa4fa364850a236 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:43 +0800 Subject: [PATCH 675/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn31/dcn31_resource.c:1356:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn31/dcn31_resource.c:894:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn31/dcn31_resource.c | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 9c637c895f91..f2d589c505a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -891,24 +891,6 @@ static const struct dc_debug_options debug_defaults_drv = { .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1353,15 +1335,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) { unsigned int i; From 6c882a573bc1d6130274ef74d1697dd769f6a9e4 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 24 May 2023 09:44:02 -0700 Subject: [PATCH 676/861] drm/amdgpu: Fix return types of certain NBIOv7.9 callbacks When building with clang's -Wincompatible-function-pointer-types-strict, which ensures that function pointer signatures match exactly to avoid tripping clang's Control Flow Integrity (kCFI) checks at run time and will eventually be turned on for the kernel, the following instances appear in the NBIOv7.9 code: drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c:465:32: error: incompatible function pointer types initializing 'int (*)(struct amdgpu_device *)' with an expression of type 'enum amdgpu_gfx_partition (struct amdgpu_device *)' [-Werror,-Wincompatible-function-pointer-types-strict] .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c:467:31: error: incompatible function pointer types initializing 'u32 (*)(struct amdgpu_device *, u32 *)' (aka 'unsigned int (*)(struct amdgpu_device *, unsigned int *)') with an expression of type 'enum amdgpu_memory_partition (struct amdgpu_device *, u32 *)' (aka 'enum amdgpu_memory_partition (struct amdgpu_device *, unsigned int *)') [-Werror,-Wincompatible-function-pointer-types-strict] .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 errors generated. Change the return types of these callbacks to match the prototypes to clear up the warning and avoid tripping kCFI at run time. Both functions return a value from ffs(), which is an integer that can fit into either int or unsigned int. Fixes: 98a54e88e87f ("drm/amdgpu: add sysfs node for compute partition mode") Fixes: ea2d2f8ececd ("drm/amdgpu: detect current GPU memory partition mode") Signed-off-by: Nathan Chancellor Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index e082f6343d20..d19325476752 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -382,7 +382,7 @@ static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev, DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } -static enum amdgpu_gfx_partition nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) +static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) { u32 tmp, px; @@ -408,8 +408,8 @@ static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS, tmp); } -static enum amdgpu_memory_partition -nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, u32 *supp_modes) +static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, + u32 *supp_modes) { u32 tmp; From a09e2065101a343ac3a709aa6236cdac874627eb Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 24 May 2023 20:37:26 +0530 Subject: [PATCH 677/861] drm/amdgpu: Fix defined but not used gfx9_cs_data in gfx_v9_4_3.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc with W=1 In file included from drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c:33: drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h:939:36: warning: ‘gfx9_cs_data’ defined but not used [-Wunused-const-variable=] 939 | static const struct cs_section_def gfx9_cs_data[] = { | gfx9_cs_data is not used in gfx_v9_4_3.c, hence remove its include in gfx_v9_4_3.c Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ed41a7862d9f..bdaea50cafe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -29,7 +29,6 @@ #include "soc15_common.h" #include "vega10_enum.h" -#include "clearstate_gfx9.h" #include "v9_structs.h" #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" From 8cce16826f5e154a3463b7eafa0f6beebeb48e49 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 24 May 2023 00:26:28 +0530 Subject: [PATCH 678/861] drm/amdgpu: Fix unused variable in amdgpu_gfx.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c: In function ‘amdgpu_gfx_disable_kcq’: drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c:497:6: warning: variable ‘j’ set but not used [-Wunused-but-set-variable] 497 | int j; | ^ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c: In function ‘amdgpu_gfx_disable_kgq’: drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c:528:6: warning: variable ‘j’ set but not used [-Wunused-but-set-variable] 528 | int j; | ^ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c: In function ‘amdgpu_gfx_enable_kgq’: drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c:630:12: warning: variable ‘j’ set but not used [-Wunused-but-set-variable] 630 | int r, i, j; | Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2b4bf6c11ae4..a33d4bc34cee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -509,7 +509,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) for (i = 0; i < adev->gfx.num_compute_rings; i++) { j = i + xcc_id * adev->gfx.num_compute_rings; kiq->pmf->kiq_unmap_queues(kiq_ring, - &adev->gfx.compute_ring[i], + &adev->gfx.compute_ring[j], RESET_QUEUES, 0, 0); } @@ -541,7 +541,7 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) for (i = 0; i < adev->gfx.num_gfx_rings; i++) { j = i + xcc_id * adev->gfx.num_gfx_rings; kiq->pmf->kiq_unmap_queues(kiq_ring, - &adev->gfx.gfx_ring[i], + &adev->gfx.gfx_ring[j], PREEMPT_QUEUES, 0, 0); } } @@ -648,7 +648,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) for (i = 0; i < adev->gfx.num_gfx_rings; i++) { j = i + xcc_id * adev->gfx.num_gfx_rings; kiq->pmf->kiq_map_queues(kiq_ring, - &adev->gfx.gfx_ring[i]); + &adev->gfx.gfx_ring[j]); } } From 423502d4f622ad90414bd38d834763277fbab26c Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:44 +0800 Subject: [PATCH 679/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource.c:1360:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource.c:737:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn32/dcn32_resource.c | 29 ------------------- 1 file changed, 29 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 33abc8c9d4be..2e6b39fe2613 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -734,26 +734,6 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_max_blank_us = 1000, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_dsc_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true, - .force_disable_subvp = true -}; - static struct dce_aux *dcn32_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1357,15 +1337,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn32_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create, - .create_hwseq = dcn32_hwseq_create, -}; - static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) { unsigned int i; From 24bc366a4309f407ea77110ba15e3581005def6b Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:45 +0800 Subject: [PATCH 680/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn301/dcn301_resource.c:1050:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn301/dcn301_resource.c:705:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn301/dcn301_resource.c | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 1cf84a086fec..3485fbb1093e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -702,23 +702,6 @@ static const struct dc_debug_options debug_defaults_drv = { .exit_idle_opt_for_cursor_updates = true }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = false, - .disable_hubp_power_gate = false, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = true, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .use_max_lb = false, -}; - static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -1047,13 +1030,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn301_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn301_hwseq_create, -}; - static void dcn301_destruct(struct dcn301_resource_pool *pool) { unsigned int i; From b7941e2fef13baabd3eade31601e70adf729e887 Mon Sep 17 00:00:00 2001 From: Jiadong Zhu Date: Wed, 24 May 2023 11:42:19 +0800 Subject: [PATCH 681/861] drm/amdgpu: Reset CP_VMID_PREEMPT after trailing fence signaled When MEC executes unmap_queue for mid command buffer preemption, it will kick the write pointer of the gfx ring, set CP_VMID_PREEMPT to trigger the preemption and wait for CP_VMID_PREEMPT becomes zero after the preemption done. There is a race condition that PFP may excute the resetting command before MEC set CP_VMID_PREEMPT. As a result, hang happens as CP_VMID_PREEMPT is always 0xffff. To avoid this, we send resetting CP_VMID_PREEMPT command after the trailing fence is siganled and update gfx write pointer explicitly. Signed-off-by: Jiadong Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fe090eafa366..45fa02063491 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5400,10 +5400,6 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) amdgpu_ring_alloc(ring, 13); gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT); - /*reset the CP_VMID_PREEMPT after trailing fence*/ - amdgpu_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), - 0x0); /* assert IB preemption, emit the trailing fence */ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, @@ -5426,6 +5422,10 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); } + /*reset the CP_VMID_PREEMPT after trailing fence*/ + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), + 0x0); amdgpu_ring_commit(ring); /* deassert preemption condition */ From cfdce594171cea19ba033e8d7ff57a767c0ccd63 Mon Sep 17 00:00:00 2001 From: Jiadong Zhu Date: Wed, 24 May 2023 16:51:32 +0800 Subject: [PATCH 682/861] drm/amdgpu: Program gds backup address as zero if no gds allocated It is firmware requirement to set gds_backup_addrlo and gds_backup_addrhi of DE meta both zero if no gds partition is allocated for the frame. Signed-off-by: Jiadong Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 45fa02063491..0189e50bd89f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -765,7 +765,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); @@ -5158,7 +5158,8 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, gfx_v9_0_ring_emit_de_meta(ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? - true : false); + true : false, + job->gds_size > 0 && job->gds_base != 0); } amdgpu_ring_write(ring, header); @@ -5433,7 +5434,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds) { struct amdgpu_device *adev = ring->adev; struct v9_de_ib_state de_payload = {0}; @@ -5464,8 +5465,10 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) PAGE_SIZE); } - de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); - de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + if (usegds) { + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + } cnt = (sizeof(de_payload) >> 2) + 4 - 2; amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); From 9535a86a4072babc37dc6bdadae52bdbb88166f5 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Wed, 17 May 2023 14:15:05 +0800 Subject: [PATCH 683/861] drm/amdgpu: bypass bios dependent operations Since bios reading does not work currently so just bypass all operations related to bios v2: hardcode the vram info for APP_APU case (hawking) v3: correct the vram_width with channel number * channel size (lijo) Signed-off-by: Shiwu Zhang Reviewed-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 66 +++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 67 +++++++++++++--------- 3 files changed, 87 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ff9ca0dbeb5a..8f50ca2bee97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1389,6 +1389,15 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) return 0; } +static bool amdgpu_device_read_bios(struct amdgpu_device *adev) +{ + if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) { + return false; + } + + return true; +} + /* * GPU helpers function. */ @@ -1408,6 +1417,9 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return false; + if (!amdgpu_device_read_bios(adev)) + return false; + if (amdgpu_passthrough(adev)) { /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot * some old smc fw still need driver do vPost otherwise gpu hang, while @@ -2318,14 +2330,16 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return r; /* Read BIOS */ - if (!amdgpu_get_bios(adev)) - return -EINVAL; + if (amdgpu_device_read_bios(adev)) { + if (!amdgpu_get_bios(adev)) + return -EINVAL; - r = amdgpu_atombios_init(adev); - if (r) { - dev_err(adev->dev, "amdgpu_atombios_init failed\n"); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); - return r; + r = amdgpu_atombios_init(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); + return r; + } } /*get pf2vf msg info at it's earliest time*/ @@ -3945,25 +3959,27 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } - if (adev->is_atom_fw) { - /* Initialize clocks */ - r = amdgpu_atomfirmware_get_clock_info(adev); - if (r) { - dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); - goto failed; + if (adev->bios) { + if (adev->is_atom_fw) { + /* Initialize clocks */ + r = amdgpu_atomfirmware_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + } else { + /* Initialize clocks */ + r = amdgpu_atombios_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + /* init i2c buses */ + if (!amdgpu_device_has_dc_support(adev)) + amdgpu_atombios_i2c_init(adev); } - } else { - /* Initialize clocks */ - r = amdgpu_atombios_get_clock_info(adev); - if (r) { - dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); - goto failed; - } - /* init i2c buses */ - if (!amdgpu_device_has_dc_support(adev)) - amdgpu_atombios_i2c_init(adev); } fence_driver_init: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 473eeac1f03b..d2d0d27f9053 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1696,7 +1696,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) uint32_t reserve_size = 0; int ret; - if (!amdgpu_sriov_vf(adev)) { + if (adev->bios && !amdgpu_sriov_vf(adev)) { if (amdgpu_atomfirmware_mem_training_supported(adev)) mem_train_support = true; else @@ -1713,7 +1713,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) if (adev->bios) reserve_size = amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); - if (!reserve_size) + + if (!adev->bios && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + reserve_size = max(reserve_size, (uint32_t)280 << 20); + else if (!reserve_size) reserve_size = DISCOVERY_TMR_OFFSET; if (mem_train_support) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 1e8b2aaa48c1..be7823d82150 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -2010,34 +2010,49 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - r = amdgpu_atomfirmware_get_vram_info(adev, - &vram_width, &vram_type, &vram_vendor); - if (amdgpu_sriov_vf(adev)) - /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, - * and DF related registers is not readable, seems hardcord is the - * only way to set the correct vram_width - */ - adev->gmc.vram_width = 2048; - else if (amdgpu_emu_mode != 1) - adev->gmc.vram_width = vram_width; - - if (!adev->gmc.vram_width) { - int chansize, numchan; - - /* hbm memory channel size */ - if (adev->flags & AMD_IS_APU) - chansize = 64; - else - chansize = 128; - if (adev->df.funcs && - adev->df.funcs->get_hbm_channel_number) { - numchan = adev->df.funcs->get_hbm_channel_number(adev); - adev->gmc.vram_width = numchan * chansize; + if (!(adev->bios) || adev->gmc.is_app_apu) { + if (adev->flags & AMD_IS_APU) { + if (adev->gmc.is_app_apu) { + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; + adev->gmc.vram_width = 128 * 64; + } else { + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; + adev->gmc.vram_width = 64 * 64; + } + } else { + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; + adev->gmc.vram_width = 128 * 64; } - } + } else { + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (amdgpu_sriov_vf(adev)) + /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, + * and DF related registers is not readable, seems hardcord is the + * only way to set the correct vram_width + */ + adev->gmc.vram_width = 2048; + else if (amdgpu_emu_mode != 1) + adev->gmc.vram_width = vram_width; - adev->gmc.vram_type = vram_type; - adev->gmc.vram_vendor = vram_vendor; + if (!adev->gmc.vram_width) { + int chansize, numchan; + + /* hbm memory channel size */ + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; + if (adev->df.funcs && + adev->df.funcs->get_hbm_channel_number) { + numchan = adev->df.funcs->get_hbm_channel_number(adev); + adev->gmc.vram_width = numchan * chansize; + } + } + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; + } switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): From 89f85765555caccec0a31b604639cea53942e522 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Tue, 16 May 2023 10:31:49 +0800 Subject: [PATCH 684/861] drm/amdgpu: golden settings for ASIC rev_id 0 Suggested by FW team that GB_ADDR_CONFIG is handled by golden settings in driver to get the expected value Signed-off-by: Shiwu Zhang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index bdaea50cafe7..35d359de0aad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L +#define GOLDEN_GB_ADDR_CONFIG 0x2a114042 + struct amdgpu_gfx_ras gfx_v9_4_3_ras; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); @@ -195,6 +197,15 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) dev_inst = GET_INST(GC, i); if (dev_inst >= 2) WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4); + + /* Golden settings applied by driver for ASIC with rev_id 0 */ + if (adev->rev_id == 0) { + WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, + GOLDEN_GB_ADDR_CONFIG); + + WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, + REDUCE_FIFO_DEPTH_BY_2, 2); + } } } From 491ae27829cda38df3ab6d2fe5d94a80ec1bbe22 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Wed, 17 May 2023 13:40:04 +0800 Subject: [PATCH 685/861] drm/amdgpu: complement the 4, 6 and 8 XCC cases Signed-off-by: Shiwu Zhang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 35d359de0aad..ebdd7fa985d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1040,6 +1040,9 @@ static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev, WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8); break; case 2: + case 4: + case 6: + case 8: tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID); tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp); From 28bb7f13e70dcd3a6c736ee1567cf91c47af2600 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 24 May 2023 10:48:40 -0400 Subject: [PATCH 686/861] drm/jpeg: add init value for num_jpeg_rings Need init new num_jpeg_rings to 1 on jpeg. Signed-off-by: James Zhu Acked-by: Alex Deucher Reviewed-by: Richard Liang Tested-by: Ying Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 1 + drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 1 + drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 1 + drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 1 + drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 1 + 5 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 1c5b60604a19..77595e9622da 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -460,6 +460,7 @@ int jpeg_v1_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_rings = 1; jpeg_v1_0_set_dec_ring_funcs(adev); jpeg_v1_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 3aeeceae34a5..c25d4a07350b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -49,6 +49,7 @@ static int jpeg_v2_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_rings = 1; jpeg_v2_0_set_dec_ring_funcs(adev); jpeg_v2_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index f533ede484d4..aadb74de52bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -60,6 +60,7 @@ static int jpeg_v2_5_early_init(void *handle) u32 harvest; int i; + adev->jpeg.num_jpeg_rings = 1; adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index cb5494effc0f..79791379fc2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -64,6 +64,7 @@ static int jpeg_v3_0_early_init(void *handle) } adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_rings = 1; jpeg_v3_0_set_dec_ring_funcs(adev); jpeg_v3_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 86383fbf9358..a707d407fbd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -58,6 +58,7 @@ static int jpeg_v4_0_early_init(void *handle) adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_rings = 1; jpeg_v4_0_set_dec_ring_funcs(adev); jpeg_v4_0_set_irq_funcs(adev); From 20997c04b7168b1833da77f882eb5a6f246c4b96 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 22 May 2023 17:11:59 +0800 Subject: [PATCH 687/861] drm/amdgpu: set the APU flag based on package type Since currently APU and dGPU share the same pcie class while gmc init needs the flag to set up correctly for upcomming memory allocations v2: call get_pkg_type in smuio 13_0_3 is enough (hawking) Signed-off-by: Shiwu Zhang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index a0685a5ebda6..859882109f55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -2500,6 +2500,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(13, 0, 3): adev->smuio.funcs = &smuio_v13_0_3_funcs; + if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { + adev->flags |= AMD_IS_APU; + } break; case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 8): From 23105541727cd6b702c9ee66d98ba50a129fbd5e Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 25 Apr 2023 16:55:56 -0400 Subject: [PATCH 688/861] drm/amdgpu: save/restore part of xcp drm_device fields Redirect xcp allocated drm_device::rdev/pdev/driver with amdgpu pci_device/drm_device setting. They need be saved before redirect and restored after unregister xcp drm_device. -v2: fix warning discarded-qualifiers Signed-off-by: James Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 16 ++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 4 ++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index fcdc0862d258..86087faab689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -240,9 +240,14 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) return PTR_ERR(p_ddev); /* Redirect all IOCTLs to the primary device */ + adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; + adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev; + adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver; + adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager; p_ddev->render->dev = ddev; p_ddev->primary->dev = ddev; p_ddev->vma_offset_manager = ddev->vma_offset_manager; + p_ddev->driver = &amdgpu_partition_driver; adev->xcp_mgr->xcp[i].ddev = p_ddev; } @@ -330,13 +335,20 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev, void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) { + struct drm_device *p_ddev; int i; if (!adev->xcp_mgr) return; - for (i = 0; i < MAX_XCP; i++) - drm_dev_unplug(adev->xcp_mgr->xcp[i].ddev); + for (i = 0; i < MAX_XCP; i++) { + p_ddev = adev->xcp_mgr->xcp[i].ddev; + drm_dev_unplug(p_ddev); + p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; + p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev; + p_ddev->driver = adev->xcp_mgr->xcp[i].driver; + p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager; + } } int amdgpu_xcp_open_device(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 9c5912b9d8bd..0f8026d64ea5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -77,6 +77,10 @@ struct amdgpu_xcp { bool valid; atomic_t ref_cnt; struct drm_device *ddev; + struct drm_device *rdev; + struct drm_device *pdev; + struct drm_driver *driver; + struct drm_vma_offset_manager *vma_offset_manager; struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; }; From 5d6cd20075c823565e7550f8de70d7615ec3c8b7 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Mon, 22 May 2023 15:58:10 +0800 Subject: [PATCH 689/861] drm/amdgpu: add the accelerator pcie class v2: add the base class id for accelerator (lijo) v3: add the new pci class in amdgpu tree (hawking) Signed-off-by: Shiwu Zhang Acked-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 5 +++++ drivers/gpu/drm/amd/include/amd_shared.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7300df2a342c..422c36ed8f36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2041,6 +2041,11 @@ static const struct pci_device_id pciidlist[] = { .class_mask = 0xffffff, .driver_data = CHIP_IP_DISCOVERY }, + { PCI_DEVICE(0x1002, PCI_ANY_ID), + .class = AMD_ACCELERATOR_PROCESSING << 8, + .class_mask = 0xffffff, + .driver_data = CHIP_IP_DISCOVERY }, + {0, 0, 0} }; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f175e65b853a..57d95e2cc54b 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -27,6 +27,7 @@ #define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */ +#define AMD_ACCELERATOR_PROCESSING 0x1200 /* hardcoded pci class */ /* * Chip flags From e0ac8656e75f1f26833c14313811043e36b0bba8 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:47 +0800 Subject: [PATCH 690/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn303/dcn303_resource.c:884:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn303/dcn303_resource.c:84:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn303/dcn303_resource.c | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index fcd126602178..f35514188a5c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -81,23 +81,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_idle_power_optimizations = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -881,13 +864,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn303_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn303_hwseq_create, -}; - static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; From aab9b215d96e0c7e89109821f738f80d84270461 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:46 +0800 Subject: [PATCH 691/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn302/dcn302_resource.c:957:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn302/dcn302_resource.c:101:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn302/dcn302_resource.c | 25 ------------------- 1 file changed, 25 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index efd98d64588d..93f42132c900 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -98,24 +98,6 @@ static const struct dc_debug_options debug_defaults_drv = { .exit_idle_opt_for_cursor_updates = true }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -954,13 +936,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn302_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn302_hwseq_create, -}; - static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; From abc7e24275a35975e58e699ff2afdbcc47e124d6 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:49 +0800 Subject: [PATCH 692/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn315/dcn315_resource.c:1357:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn315/dcn315_resource.c:893:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn315/dcn315_resource.c | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index e6d87c162d26..cb95e978417b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -890,24 +890,6 @@ static const struct dc_debug_options debug_defaults_drv = { .psr_power_use_phy_fsm = 0, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1354,15 +1336,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn315_resource_destruct(struct dcn315_resource_pool *pool) { unsigned int i; From c0c2c51c40fef6960c11a3f132acf91878fa0de0 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 24 May 2023 16:57:08 +0800 Subject: [PATCH 693/861] drm/amd/display: clean up some inconsistent indenting No functional modification involved. drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn314/dcn314_fpu.c:269 dcn314_update_bw_bounding_box_fpu() warn: inconsistent indenting. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5305 Signed-off-by: Jiapeng Chong Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 318b9c2bc9be..c9afddd11589 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -265,8 +265,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p } dcn20_patch_bounding_box(dc, &dcn3_14_soc); - - dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); + dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314); } static bool is_dual_plane(enum surface_pixel_format format) From 2c4993bf88ef1e0ed6c81d2fb56f30f32c3d2e74 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:50 +0800 Subject: [PATCH 694/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn316/dcn316_resource.c:1355:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn316/dcn316_resource.c:899:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn316/dcn316_resource.c | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index a3be61cc541f..b6e0aa2ab27a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -886,24 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = { }, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1352,15 +1334,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn31_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - static void dcn316_resource_destruct(struct dcn316_resource_pool *pool) { unsigned int i; From 3a10a44a3e00d0227d13210ffeef50daa3a326bb Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 24 May 2023 16:57:09 +0800 Subject: [PATCH 695/861] drm/amd/display: clean up some inconsistent indenting No functional modification involved. drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn321/dcn321_fpu.c:556 dcn321_update_bw_bounding_box_fpu() warn: inconsistent indenting. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5304 Signed-off-by: Jiapeng Chong Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dml/dcn321/dcn321_fpu.c | 268 +++++++++--------- 1 file changed, 134 insertions(+), 134 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index ffd7c3c1b142..1aaff6f2d453 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -553,148 +553,148 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ - if (dc->debug.use_legacy_soc_bb_mechanism) { - unsigned int i = 0, j = 0, num_states = 0; + if (dc->debug.use_legacy_soc_bb_mechanism) { + unsigned int i = 0, j = 0, num_states = 0; - unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; - unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; - unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564}; - unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0; - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564}; + unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0; + unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - } - if (!max_dcfclk_mhz) - max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz; - if (!max_dispclk_mhz) - max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz; - if (!max_dppclk_mhz) - max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz; - if (!max_phyclk_mhz) - max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + if (!max_dcfclk_mhz) + max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz; + if (!max_dispclk_mhz) + max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz; + if (!max_dppclk_mhz) + max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz; + if (!max_phyclk_mhz) + max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; - if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array - dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; - num_dcfclk_sta_targets++; - } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates - for (i = 0; i < num_dcfclk_sta_targets; i++) { - if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { - dcfclk_sta_targets[i] = max_dcfclk_mhz; - break; - } - } - // Update size of array since we "removed" duplicates - num_dcfclk_sta_targets = i + 1; - } - - num_uclk_states = bw_params->clk_table.num_entries; - - // Calculate optimal dcfclk for each uclk - for (i = 0; i < num_uclk_states; i++) { - dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, - &optimal_dcfclk_for_uclk[i], NULL); - if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { - optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; - } - } - - // Calculate optimal uclk for each dcfclk sta target + if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array + dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates for (i = 0; i < num_dcfclk_sta_targets; i++) { - for (j = 0; j < num_uclk_states; j++) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { - optimal_uclk_for_dcfclk_sta_targets[i] = - bw_params->clk_table.entries[j].memclk_mhz * 16; - break; - } + if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { + dcfclk_sta_targets[i] = max_dcfclk_mhz; + break; } } - - i = 0; - j = 0; - // create the final dcfclk and uclk table - while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } else { - if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } else { - j = num_uclk_states; - } - } - } - - while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } - - while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && - optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } - - dcn3_21_soc.num_states = num_states; - for (i = 0; i < dcn3_21_soc.num_states; i++) { - dcn3_21_soc.clock_limits[i].state = i; - dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; - dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; - - /* Fill all states with max values of all these clocks */ - dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; - dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; - dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3; - - /* Populate from bw_params for DTBCLK, SOCCLK */ - if (i > 0) { - if (!bw_params->clk_table.entries[i].dtbclk_mhz) { - dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz; - } else { - dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - } - } else if (bw_params->clk_table.entries[i].dtbclk_mhz) { - dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - } - - if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) - dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz; - else - dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; - - if (!dram_speed_mts[i] && i > 0) - dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts; - else - dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; - - /* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */ - /* PHYCLK_D18, PHYCLK_D32 */ - dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; - } - } else { - build_synthetic_soc_states(bw_params, dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); + // Update size of array since we "removed" duplicates + num_dcfclk_sta_targets = i + 1; } - /* Re-init DML with updated bb */ - dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); - if (dc->current_state) - dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + num_uclk_states = bw_params->clk_table.num_entries; + + // Calculate optimal dcfclk for each uclk + for (i = 0; i < num_uclk_states; i++) { + dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; + } + } + + // Calculate optimal uclk for each dcfclk sta target + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; + } + } + } + + i = 0; + j = 0; + // create the final dcfclk and uclk table + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } else { + j = num_uclk_states; + } + } + } + + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } + + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + + dcn3_21_soc.num_states = num_states; + for (i = 0; i < dcn3_21_soc.num_states; i++) { + dcn3_21_soc.clock_limits[i].state = i; + dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; + dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; + + /* Fill all states with max values of all these clocks */ + dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; + dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; + dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; + dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3; + + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (i > 0) { + if (!bw_params->clk_table.entries[i].dtbclk_mhz) { + dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz; + } else { + dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + } + } else if (bw_params->clk_table.entries[i].dtbclk_mhz) { + dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + } + + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; + + if (!dram_speed_mts[i] && i > 0) + dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts; + else + dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; + + /* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */ + /* PHYCLK_D18, PHYCLK_D32 */ + dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; + dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; + } + } else { + build_synthetic_soc_states(bw_params, dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); + } + + /* Re-init DML with updated bb */ + dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + if (dc->current_state) + dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); } From 70a6267753c1f755157e3bfb63d8fce4137729c6 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:51 +0800 Subject: [PATCH 696/861] drm/amd/display: remove unused definition Eliminate the following warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn321/dcn321_resource.c:1346:43: warning: unused variable 'res_create_maximus_funcs' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn321/dcn321_resource.c:735:38: warning: unused variable 'debug_defaults_diags' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn321/dcn321_resource.c | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index af0bb3e94250..bbcd3579fea6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -732,27 +732,6 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_max_blank_us = 1000, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_dsc_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true, - .force_disable_subvp = true, -}; - - static struct dce_aux *dcn321_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1343,15 +1322,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn321_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn321_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn321_hpo_dp_link_encoder_create, - .create_hwseq = dcn321_hwseq_create, -}; - static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) { unsigned int i; From 3034983db355daefc4463defce802b8e6d86539f Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 18:10:40 +0530 Subject: [PATCH 697/861] drm/amdgpu: Mark mmhub_v1_8_mmea_err_status_reg as __maybe_unused MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Silencing the compiler from below compilation error: drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c:704:23: error: variable 'mmhub_v1_8_mmea_err_status_reg' is not needed and will not be emitted [-Werror,-Wunneeded-internal-declaration] static const uint32_t mmhub_v1_8_mmea_err_status_reg[] = { ^ 1 error generated. Mark the variable as __maybe_unused to make it clear to clang that this is expected, so there is no more warning. Cc: Christian König Cc: Lijo Lazar Cc: Luben Tuikov Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Nathan Chancellor Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index bf3b0f382c89..5e8b493f8699 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -701,7 +701,7 @@ static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev) mmhub_v1_8_inst_reset_ras_error_count(adev, i); } -static const uint32_t mmhub_v1_8_mmea_err_status_reg[] = { +static const u32 mmhub_v1_8_mmea_err_status_reg[] __maybe_unused = { regMMEA0_ERR_STATUS, regMMEA1_ERR_STATUS, regMMEA2_ERR_STATUS, From 3b60b70dbec9bb2450ecf012a7b8b6e5dce7168d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 25 Apr 2023 17:00:50 -0400 Subject: [PATCH 698/861] drm/amdxcp: add platform device driver for amdxcp Add platform device driver for amdxcp to support amdgpu spatial partition. -v2: fix build warning Signed-off-by: James Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/amd/amdxcp/Makefile | 25 +++++ drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c | 110 ++++++++++++++++++++ drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h | 29 ++++++ 4 files changed, 165 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdxcp/Makefile create mode 100644 drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c create mode 100644 drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a33257d2bc7f..b119089c312f 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -140,6 +140,7 @@ obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_SCHED) += scheduler/ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ +obj-$(CONFIG_DRM_AMDGPU)+= amd/amdxcp/ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ diff --git a/drivers/gpu/drm/amd/amdxcp/Makefile b/drivers/gpu/drm/amd/amdxcp/Makefile new file mode 100644 index 000000000000..5e1bd70748d4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdxcp/Makefile @@ -0,0 +1,25 @@ +# +# Copyright 2023 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +amdgpu-y := amdgpu_xcp_drv.o + +obj-$(CONFIG_DRM_AMDGPU) += amdgpu_xcp_drv.o diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c new file mode 100644 index 000000000000..353597fc908d --- /dev/null +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c @@ -0,0 +1,110 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include + +#include + +#include "amdgpu_xcp_drv.h" + +#define MAX_XCP_PLATFORM_DEVICE 64 + +struct xcp_device { + struct drm_device drm; + struct platform_device *pdev; +}; + +static const struct drm_driver amdgpu_xcp_driver = { + .driver_features = DRIVER_GEM | DRIVER_RENDER, + .name = "amdgpu_xcp_drv", + .major = 1, + .minor = 0, +}; + +static int pdev_num; +static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE]; + +int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev) +{ + struct platform_device *pdev; + struct xcp_device *pxcp_dev; + int ret; + + if (pdev_num >= MAX_XCP_PLATFORM_DEVICE) + return -ENODEV; + + pdev = platform_device_register_simple("amdgpu_xcp", pdev_num, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unregister; + } + + pxcp_dev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_xcp_driver, struct xcp_device, drm); + if (IS_ERR(pxcp_dev)) { + ret = PTR_ERR(pxcp_dev); + goto out_devres; + } + + xcp_dev[pdev_num] = pxcp_dev; + xcp_dev[pdev_num]->pdev = pdev; + *ddev = &pxcp_dev->drm; + pdev_num++; + + return 0; + +out_devres: + devres_release_group(&pdev->dev, NULL); +out_unregister: + platform_device_unregister(pdev); + + return ret; +} +EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc); + +void amdgpu_xcp_drv_release(void) +{ + for (--pdev_num; pdev_num >= 0; --pdev_num) { + devres_release_group(&xcp_dev[pdev_num]->pdev->dev, NULL); + platform_device_unregister(xcp_dev[pdev_num]->pdev); + xcp_dev[pdev_num]->pdev = NULL; + xcp_dev[pdev_num] = NULL; + } + pdev_num = 0; +} +EXPORT_SYMBOL(amdgpu_xcp_drv_release); + +static void __exit amdgpu_xcp_drv_exit(void) +{ + amdgpu_xcp_drv_release(); +} + +module_exit(amdgpu_xcp_drv_exit); + +MODULE_AUTHOR("AMD linux driver team"); +MODULE_DESCRIPTION("AMD XCP PLATFORM DEVICES"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h new file mode 100644 index 000000000000..c1c4b679bf95 --- /dev/null +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AMDGPU_XCP_DRV_H_ +#define _AMDGPU_XCP_DRV_H_ + +int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev); +void amdgpu_xcp_drv_release(void); +#endif /* _AMDGPU_XCP_DRV_H_ */ From ab1270a29b4f2b23aaa28d590d8361903c68b770 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:48 +0800 Subject: [PATCH 699/861] drm/amd/display: remove unused definition Eliminate the following warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn314/dcn314_resource.c:1390:43: warning: unused variable 'res_create_maximus_funcs' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 2483d37e425d..3592efcc7fae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1387,15 +1387,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn314_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn314_hwseq_create, -}; - static void dcn314_resource_destruct(struct dcn314_resource_pool *pool) { unsigned int i; From 9938333a46c9e20539c85ca7df42a739541b0493 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 25 Apr 2023 17:02:48 -0400 Subject: [PATCH 700/861] drm/amdgpu: use amdxcp platform device as spatial partition Use amdxcp platform device as spatial partition device. -v2: remove unused variable Signed-off-by: James Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 12 +++++------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 422c36ed8f36..7489b2b1a0d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -50,6 +50,7 @@ #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" #include "amdgpu_reset.h" +#include "../amdxcp/amdgpu_xcp_drv.h" /* * KMS wrapper. @@ -2948,6 +2949,7 @@ static void __exit amdgpu_exit(void) amdgpu_sync_fini(); amdgpu_fence_slab_fini(); mmu_notifier_synchronize(); + amdgpu_xcp_drv_release(); } module_init(amdgpu_init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 86087faab689..d733fa6e7477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -25,6 +25,7 @@ #include "amdgpu_drv.h" #include +#include "../amdxcp/amdgpu_xcp_drv.h" static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp_ip *xcp_ip, int xcp_state) @@ -226,18 +227,15 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) { struct drm_device *p_ddev; - struct pci_dev *pdev; struct drm_device *ddev; - int i; + int i, ret; - pdev = adev->pdev; ddev = adev_to_drm(adev); for (i = 0; i < MAX_XCP; i++) { - p_ddev = drm_dev_alloc(&amdgpu_partition_driver, - &pci_upstream_bridge(pdev)->dev); - if (IS_ERR(p_ddev)) - return PTR_ERR(p_ddev); + ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); + if (ret) + return ret; /* Redirect all IOCTLs to the primary device */ adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; From 321488d180c2f5c1811a0ba7b18d18c7af87739b Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 24 May 2023 11:59:52 +0800 Subject: [PATCH 701/861] drm/amd/display: remove unused definition Eliminate the following warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn201/dcn201_resource.c:899:43: warning: unused variable 'res_create_maximus_funcs' Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5296 Fixes: 25879d7b4986 ("drm/amd/display: Clean FPGA code in dc") Signed-off-by: Yang Li Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 212c475d95cb..5ff09bf4bc30 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -896,13 +896,6 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dcn201_hwseq_create, }; -static const struct resource_create_funcs res_create_maximus_funcs = { - .read_dce_straps = NULL, - .create_audio = NULL, - .create_stream_encoder = NULL, - .create_hwseq = dcn201_hwseq_create, -}; - static void dcn201_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); From 9c9d501b28a00f4365632260df6cae488a905af7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 25 May 2023 11:04:51 +0300 Subject: [PATCH 702/861] drm/amd/amdgpu: Fix up locking etc in amdgpu_debugfs_gprwave_ioctl() There are two bugs here. 1) Drop the lock if copy_from_user() fails. 2) If the copy fails then the correct error code is -EFAULT instead of -EINVAL. I also broke up the long line and changed "sizeof rd->id" to "sizeof(rd->id)". Fixes: 553f973a0d7b ("drm/amd/amdgpu: Update debugfs for XCC support (v3)") Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index c657bed350ac..56e89e76ff17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -478,15 +478,16 @@ done: static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data) { struct amdgpu_debugfs_gprwave_data *rd = f->private_data; - int r; + int r = 0; mutex_lock(&rd->lock); switch (cmd) { case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE: - r = copy_from_user(&rd->id, (struct amdgpu_debugfs_gprwave_iocdata *)data, sizeof rd->id); - if (r) - return r ? -EINVAL : 0; + if (copy_from_user(&rd->id, + (struct amdgpu_debugfs_gprwave_iocdata *)data, + sizeof(rd->id))) + r = -EFAULT; goto done; default: r = -EINVAL; From 8ffd6f0442674f32c048ec8dffdbc5ec67829beb Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Thu, 25 May 2023 17:24:31 +0800 Subject: [PATCH 703/861] drm/amdgpu: keep irq count in amdgpu_irq_disable_all MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can clean up all irq warnings because of unbalanced amdgpu_irq_get/put when unplugging/unbinding device, and leave irq count decrease in each ip fini function. Signed-off-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 5667053c295c..3481d2808ce5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -139,7 +139,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) continue; for (k = 0; k < src->num_types; ++k) { - atomic_set(&src->enabled_types[k], 0); r = src->funcs->set(adev, src, k, AMDGPU_IRQ_STATE_DISABLE); if (r) From 28ebbb4981cb1fad12e0b1227dbecc88810b1ee8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 24 May 2023 14:30:12 -0400 Subject: [PATCH 704/861] drm/amdkfd: fix gfx_target_version for certain 11.0.3 devices Certain boards with GC IP 11.0.3 need slightly different handling in the shader compiler due to board specific bounding box optimizations. Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 862a50f7b490..ebc3c3f965f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -411,8 +411,15 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &gfx_v11_kfd2kgd; break; case IP_VERSION(11, 0, 3): - /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ - gfx_target_version = 110001; + if ((adev->pdev->device == 0x7460 && + adev->pdev->revision == 0x00) || + (adev->pdev->device == 0x7461 && + adev->pdev->revision == 0x00)) + /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */ + gfx_target_version = 110005; + else + /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ + gfx_target_version = 110001; f2g = &gfx_v11_kfd2kgd; break; default: From 3a25071a970885a2bd3f63cfc7c729e0d536e10f Mon Sep 17 00:00:00 2001 From: Ikshwaku Chauhan Date: Thu, 25 May 2023 10:57:26 +0530 Subject: [PATCH 705/861] drm/amdgpu: enable tmz by default for GC 11.0.1 Add IP GC 11.0.1 in the list of target to have tmz enabled by default. Signed-off-by: Ikshwaku Chauhan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index d12625f1de5a..d78bd9732543 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -592,6 +592,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 3, 0): /* GC 10.3.7 */ case IP_VERSION(10, 3, 7): + /* GC 11.0.1 */ + case IP_VERSION(11, 0, 1): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -615,7 +617,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): /* Don't enable it by default yet. */ From 23616d1ff31d6e8ffd4f1e12b6b1e2e783fa8280 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 25 May 2023 22:32:35 +0530 Subject: [PATCH 706/861] drm/amdgpu: Fix up kdoc in sdma_v4_4_2.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address a bunch of kdoc warnings: gcc with W=1 drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:426: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_gfx_stop' drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:457: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_rlc_stop' drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:470: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_page_stop' drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:506: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_ctx_switch_enable' drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:794: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_rlc_resume' drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:810: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_load_microcode' drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c:854: warning: Function parameter or member 'inst_mask' not described in 'sdma_v4_4_2_inst_start' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index ff41fb577cdd..8eebf9c2bbcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -418,6 +418,7 @@ static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer + * @inst_mask: mask of dma engine instances to be disabled * * Stop the gfx async dma ring buffers. */ @@ -449,6 +450,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines * * @adev: amdgpu_device pointer + * @inst_mask: mask of dma engine instances to be disabled * * Stop the compute async dma queues. */ @@ -462,6 +464,7 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev, * sdma_v4_4_2_inst_page_stop - stop the page async dma engines * * @adev: amdgpu_device pointer + * @inst_mask: mask of dma engine instances to be disabled * * Stop the page async dma ring buffers. */ @@ -498,6 +501,7 @@ static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. + * @inst_mask: mask of dma engine instances to be enabled * * Halt or unhalt the async dma engines context switch. */ @@ -785,6 +789,7 @@ static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev) * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer + * @inst_mask: mask of dma engine instances to be enabled * * Set up the compute DMA queues and enable them. * Returns 0 for success, error for failure. @@ -801,6 +806,7 @@ static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev, * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode * * @adev: amdgpu_device pointer + * @inst_mask: mask of dma engine instances to be enabled * * Loads the sDMA0/1 ucode. * Returns 0 for success, -EINVAL if the ucode is not available. @@ -845,6 +851,7 @@ static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev, * sdma_v4_4_2_inst_start - setup and start the async dma engines * * @adev: amdgpu_device pointer + * @inst_mask: mask of dma engine instances to be enabled * * Set up the DMA engines and enable them. * Returns 0 for success, error for failure. From 66dadf1ab196fd2cf8c41f07a4745ad7fb84726e Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 25 May 2023 23:30:59 +0530 Subject: [PATCH 707/861] drm/amdgpu: Fix up kdoc in amdgpu_acpi.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by adding & deleting the deviant arguments. gcc with W=1 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c:906: warning: Function parameter or member 'numa_info' not described in 'amdgpu_acpi_get_node_id' drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c:906: warning: Excess function parameter 'nid' description in 'amdgpu_acpi_get_node_id' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 3fb2c3af0998..be881f21b340 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -894,7 +894,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) * acpi device handle * * @handle: acpi handle - * @nid: NUMA Node id returned by the platform firmware + * @numa_info: amdgpu_numa_info structure holding numa information * * Queries the ACPI interface to fetch the corresponding NUMA Node ID for a * given amdgpu acpi device. From 9eba1b8b70f6488e944fdd1928ef758917bf7229 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 25 May 2023 21:37:35 +0530 Subject: [PATCH 708/861] drm/amdgpu: Fix up missing kdoc in sdma_v6_0.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address a bunch of kdoc warnings: gcc with W=1 drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:248: warning: Function parameter or member 'job' not described in 'sdma_v6_0_ring_emit_ib' drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:248: warning: Function parameter or member 'flags' not described in 'sdma_v6_0_ring_emit_ib' drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:946: warning: Function parameter or member 'timeout' not described in 'sdma_v6_0_ring_test_ib' drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:1125: warning: Function parameter or member 'ring' not described in 'sdma_v6_0_ring_pad_ib' drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:1176: warning: Function parameter or member 'vmid' not described in 'sdma_v6_0_ring_emit_vm_flush' drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:1176: warning: Function parameter or member 'pd_addr' not described in 'sdma_v6_0_ring_emit_vm_flush' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 1c90b5c661fb..967849c59ebe 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -238,6 +238,8 @@ static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * * @ring: amdgpu ring pointer * @ib: IB object to schedule + * @flags: unused + * @job: job to retrieve vmid from * * Schedule an IB in the DMA ring. */ @@ -938,6 +940,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) * sdma_v6_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring. * Returns 0 on success, error on failure. @@ -1167,6 +1170,8 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA * * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA. From 07a1475279244cd8eea81bec44fd5f0a9d6871f8 Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Mon, 3 Apr 2023 15:31:53 -0400 Subject: [PATCH 709/861] drm/amdkfd: Add new gfx_target_versions for GC 9.4.3 For GC 9.4.3, set gfx_target_version to 90402 for rev 1 and later (APU or dGPU), 90401 for rev 0 dGPU, and 90400 for rev 0 APU. Signed-off-by: Graham Sider Acked-by: Alex Deucher Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index ebc3c3f965f9..e84ad1c5ef44 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -332,7 +332,9 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &aldebaran_kfd2kgd; break; case IP_VERSION(9, 4, 3): - gfx_target_version = 90400; + gfx_target_version = adev->rev_id >= 1 ? 90402 + : adev->flags & AMD_IS_APU ? 90400 + : 90401; f2g = &gc_9_4_3_kfd2kgd; break; /* Navi10 */ From b695c97b580a1949d0dd96aa17b01d4de738eda3 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 29 May 2023 19:18:54 +0530 Subject: [PATCH 710/861] drm/amdkfd: Fix MEC pipe interrupt enablement for_each_inst modifies xcc_mask and therefore the loop doesn't initialize properly interrupts on all pipes. Keep looping through xcc as the outer loop to fix this issue. Fixes: c4050ff1a43e ("drm/amdkfd: Use xcc mask for identifying xcc") Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 493b4b66f180..80cddb46657f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1274,11 +1274,12 @@ static void init_interrupts(struct device_queue_manager *dqm) uint32_t xcc_mask = dqm->dev->xcc_mask; unsigned int i, xcc_id; - for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { - if (is_pipe_enabled(dqm, 0, i)) { - for_each_inst(xcc_id, xcc_mask) + for_each_inst(xcc_id, xcc_mask) { + for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { + if (is_pipe_enabled(dqm, 0, i)) { dqm->dev->kfd2kgd->init_interrupts( dqm->dev->adev, i, xcc_id); + } } } } From 837d4e071d250d695eba7a08c55c77f6a5b4bb5e Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 27 May 2023 22:24:53 +0530 Subject: [PATCH 711/861] drm/amdgpu: Fix create_dmamap_sg_bo kdoc warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following gcc with W=1: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:292: warning: Cannot understand * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index cc37f04651e2..23390b5932fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -290,8 +290,9 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) } /** - * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information + * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information * about USERPTR or DOOREBELL or MMIO BO. + * * @adev: Device for which dmamap BO is being created * @mem: BO of peer device that is being DMA mapped. Provides parameters * in building the dmamap BO From 21d81681c298e9712fe37df4b001e8476fc7d03b Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 27 May 2023 19:45:52 +0530 Subject: [PATCH 712/861] drm/amd/display: Add missing kdoc entries in update_planes_and_stream_adapter Fixes the following gcc with W=1: drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:374: warning: Function parameter or member 'dc' not described in 'update_planes_and_stream_adapter' drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:374: warning: Function parameter or member 'update_type' not described in 'update_planes_and_stream_adapter' drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:374: warning: Function parameter or member 'planes_count' not described in 'update_planes_and_stream_adapter' drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:374: warning: Function parameter or member 'stream' not described in 'update_planes_and_stream_adapter' drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:374: warning: Function parameter or member 'stream_update' not described in 'update_planes_and_stream_adapter' drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:374: warning: Function parameter or member 'array_of_surface_update' not described in 'update_planes_and_stream_adapter' Cc: Rodrigo Siqueira Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c98c57deb5eb..d0532d7e8341 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -365,6 +365,14 @@ static inline void reverse_planes_order(struct dc_surface_update *array_of_surfa * adjustments and preparation before calling it. This function is a wrapper * for the dc_update_planes_and_stream that does any required configuration * before passing control to DC. + * + * @dc: Display Core control structure + * @update_type: specify whether it is FULL/MEDIUM/FAST update + * @planes_count: planes count to update + * @stream: stream state + * @stream_update: stream update + * @array_of_surface_update: dc surface update pointer + * */ static inline bool update_planes_and_stream_adapter(struct dc *dc, int update_type, From 0be5ccd518031be41266ef952db2202900d519cc Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 27 May 2023 22:05:40 +0530 Subject: [PATCH 713/861] drm/amd/display: Fix up kdoc formats in dcn32_fpu.c Fixes the following gcc with W=1: drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2806: warning: Cannot understand * ************************************************************************* drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2855: warning: Cannot understand * ************************************************************************* drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2900: warning: Function parameter or member 'dc' not described in 'dcn32_assign_fpo_vactive_candidate' drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2900: warning: Function parameter or member 'context' not described in 'dcn32_assign_fpo_vactive_candidate' drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2900: warning: Function parameter or member 'fpo_candidate_stream' not described in 'dcn32_assign_fpo_vactive_candidate' drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2929: warning: Function parameter or member 'dc' not described in 'dcn32_find_vactive_pipe' drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2929: warning: Function parameter or member 'context' not described in 'dcn32_find_vactive_pipe' drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/dcn32_fpu.c:2929: warning: Function parameter or member 'vactive_margin_req_us' not described in 'dcn32_find_vactive_pipe' Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 33 ++++++++----------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 137ff970c9aa..7e03c844b05e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2803,12 +2803,11 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) } /** - * ************************************************************************************************ * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed - * @param [in]: pipe: Pipe to be considered for use in subvp + * @dc: Current DC state + * @context: New DC state to be programmed + * @pipe: Pipe to be considered for use in subvp * * On high refresh rate display configs, we will allow subvp under the following conditions: * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440 @@ -2817,9 +2816,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) * 4. Freesync is inactive * 5. For single display cases, freesync must be disabled * - * @return: True if pipe can be used for subvp, false otherwise - * - * ************************************************************************************************ + * Return: True if pipe can be used for subvp, false otherwise */ bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe) { @@ -2852,15 +2849,12 @@ bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context } /** - * ******************************************************************************************* * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed + * @dc: Current DC state + * @context: New DC state to be programmed * - * @return: Max vratio for prefetch - * - * ******************************************************************************************* + * Return: Max vratio for prefetch */ double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) { @@ -2890,9 +2884,9 @@ double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *conte * ActiveMargin <= 0 to be the FPO stream candidate if found. * * - * @param [in]: dc - current dc state - * @param [in]: context - new dc state - * @param [out]: fpo_candidate_stream - pointer to FPO stream candidate if one is found + * @dc: current dc state + * @context: new dc state + * @fpo_candidate_stream: pointer to FPO stream candidate if one is found * * Return: void */ @@ -2918,10 +2912,9 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co /** * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE * - * @param [in]: dc - current dc state - * @param [in]: context - new dc state - * @param [in]: vactive_margin_req_us - The vactive marign required for a vactive pipe to be - * considered "found" + * @dc: current dc state + * @context: new dc state + * @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found" * * Return: True if VACTIVE display is found, false otherwise */ From c6a64ad9b7f7182b5e2439a740574300b2e61951 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 30 May 2023 11:52:45 +0530 Subject: [PATCH 714/861] drm/amdgpu: Initialize xcc mask For ASICs which are not initialized through discovery, initialize GFX cluster as 1. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 809558c718e3..da6caff78c22 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3028,6 +3028,7 @@ static int gfx_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), GFX6_NUM_COMPUTE_RINGS); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0f0c12bbe228..20fcd86a3e79 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4179,6 +4179,7 @@ static int gfx_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 2f1ef75e126c..51c1745c8369 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5262,6 +5262,7 @@ static int gfx_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); From c22b044070971e474dd0ff81a9830df93751f726 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Mon, 29 May 2023 16:01:37 -0500 Subject: [PATCH 715/861] drm/amdkfd: flag added to handle errors from svm validate and map If a return error is raised during validation and mapping of a prange, this flag is set. It is a rare occurrence, but it could happen when `amdgpu_hmm_range_get_pages_done` returns true. In such cases, the caller should retry. However, it is important to ensure that the prange is updated correctly during the retry. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index ee16130ddc75..9c88d6e90c3b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -809,7 +809,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, } } - return true; + return !prange->is_error_flag; } /** @@ -1691,6 +1691,7 @@ unlock_out: unreserve_out: svm_range_unreserve_bos(ctx); + prange->is_error_flag = !!r; if (!r) prange->validate_timestamp = ktime_get_boottime(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 762679835e31..21b14510882b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -134,6 +134,7 @@ struct svm_range { DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); bool validated_once; bool mapped_to_gpu; + bool is_error_flag; }; static inline void svm_range_lock(struct svm_range *prange) From ca2943fe0acecfc89937dcf0abef2d7c1bccf9f4 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 29 May 2023 19:29:35 +0530 Subject: [PATCH 716/861] drm/amdgpu: Fix missing parameter desc for 'xcc_id' in gfx_v7_0.c & amdgpu_rlc.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by adding 'xcc_id' arguments. gcc with W=1 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c:1557: warning: Function parameter or member 'xcc_id' not described in 'gfx_v7_0_select_se_sh' drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c:38: warning: Function parameter or member 'xcc_id' not described in 'amdgpu_gfx_rlc_enter_safe_mode' drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c:62: warning: Function parameter or member 'xcc_id' not described in 'amdgpu_gfx_rlc_exit_safe_mode' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index d3bed9a3e61f..35e0ae9acadc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -31,6 +31,7 @@ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode * * @adev: amdgpu_device pointer + * @xcc_id: xcc accelerated compute core id * * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. */ @@ -55,6 +56,7 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id) * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode * * @adev: amdgpu_device pointer + * @xcc_id: xcc accelerated compute core id * * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 20fcd86a3e79..8c174c11eaee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1548,7 +1548,7 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * @sh_num: sh block to address * @instance: Certain registers are instanced per SE or SH. * 0xffffffff means broadcast to all SEs or SHs (CIK). - * + * @xcc_id: xcc accelerated compute core id * Select which SE, SH combinations to address. */ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, From 3eeb0d037a543588cf2b8890224ec26841e1069a Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Tue, 30 May 2023 18:17:17 +0530 Subject: [PATCH 717/861] drm/amdgpu: Fix up missing kdoc parameter 'inst' in get_wave_count() & kgd_gfx_v9_get_cu_occupancy() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by adding 'inst' arguments to kdocs. gcc with W=1 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:692: warning: Function parameter or member 'inst' not described in 'get_wave_count' drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:763: warning: Function parameter or member 'inst' not described in 'kgd_gfx_v9_get_cu_occupancy' Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 9fa9aab22fe9..34bf030f3137 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -685,7 +685,8 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * @queue_idx: Index of queue in the queue-map bit-field * @wave_cnt: Output parameter updated with number of waves in flight * @vmid: Output parameter updated with VMID of queue whose wave count - * is being collected + * is being collected + * @inst: xcc's instance number on a multi-XCC setup */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, int *wave_cnt, int *vmid, uint32_t inst) @@ -721,9 +722,10 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * @adev: Handle of device from which to get number of waves in flight * @pasid: Identifies the process for which this query call is invoked * @pasid_wave_cnt: Output parameter updated with number of waves in flight that - * belong to process with given pasid + * belong to process with given pasid * @max_waves_per_cu: Output parameter updated with maximum number of waves - * possible per Compute Unit + * possible per Compute Unit + * @inst: xcc's instance number on a multi-XCC setup * * Note: It's possible that the device has too many queues (oversubscription) * in which case a VMID could be remapped to a different PASID. This could lead From 1bae03aab2b41770b9198b3ef1ddc7dc7efb0678 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Tue, 30 May 2023 14:43:14 +0530 Subject: [PATCH 718/861] drm/amdgpu: Fix up missing parameter in kdoc for 'inst' in gmc_ v7, v8, v9, v10, v11.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by adding 'inst' arguments to kdocs. gcc with W=1 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c:428: warning: Function parameter or member 'inst' not described in 'gmc_v7_0_flush_gpu_tlb_pasid' drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c:626: warning: Function parameter or member 'inst' not described in 'gmc_v8_0_flush_gpu_tlb_pasid' drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c:423: warning: Function parameter or member 'inst' not described in 'gmc_v10_0_flush_gpu_tlb_pasid' drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c:328: warning: Function parameter or member 'inst' not described in 'gmc_v11_0_flush_gpu_tlb_pasid' drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c:950: warning: Function parameter or member 'inst' not described in 'gmc_v9_0_flush_gpu_tlb_pasid' Cc: Christian König Cc: Alex Deucher Cc: Hawking Zhang Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 + 5 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 01bd45651382..b2e42f1b0f12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -414,6 +414,7 @@ error_alloc: * @pasid: pasid to be flush * @flush_type: the flush type * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB() + * @inst: is used to select which instance of KIQ to use for the invalidation * * Flush the TLB for the requested pasid. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 4bf807d825c0..c571f0d95994 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -319,6 +319,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * @pasid: pasid to be flush * @flush_type: the flush type * @all_hub: flush all hubs + * @inst: is used to select which instance of KIQ to use for the invalidation * * Flush the TLB for the requested pasid. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 6f53049619cd..acd2b407860f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -419,6 +419,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * @pasid: pasid to be flush * @flush_type: type of flush * @all_hub: flush all hubs + * @inst: is used to select which instance of KIQ to use for the invalidation * * Flush the TLB for the requested pasid. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 48475077ca92..85dead2a5702 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -617,6 +617,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * @pasid: pasid to be flush * @flush_type: type of flush * @all_hub: flush all hubs + * @inst: is used to select which instance of KIQ to use for the invalidation * * Flush the TLB for the requested pasid. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index be7823d82150..3ed286b72cae 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -941,6 +941,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * @pasid: pasid to be flush * @flush_type: the flush type * @all_hub: flush all hubs + * @inst: is used to select which instance of KIQ to use for the invalidation * * Flush the TLB for the requested pasid. */ From 932fc49479303961c1da54a1112eb26cdc890c76 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 29 May 2023 20:23:33 +0530 Subject: [PATCH 719/861] drm/amdgpu: Fix missing parameter desc for 'xcp_id' in amdgpu_amdkfd_reserve_mem_limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by adding 'xcp_id' argument. gcc with W=1 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:160: warning: Function parameter or member 'xcp_id' not described in 'amdgpu_amdkfd_reserve_mem_limit' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Acked-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 23390b5932fb..f61527b800e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -153,8 +153,11 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size) * @size: Size of buffer, in bytes, encapsulated by B0. This should be * equivalent to amdgpu_bo_size(BO) * @alloc_flag: Flag used in allocating a BO as noted above + * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is + * managed as one compute node in driver for app * - * Return: returns -ENOMEM in case of error, ZERO otherwise + * Return: + * returns -ENOMEM in case of error, ZERO otherwise */ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id) From 7a66ad6c087ee3863cc9a8d696ac2191d1c2e904 Mon Sep 17 00:00:00 2001 From: ZhenGuo Yin Date: Tue, 9 May 2023 17:42:11 +0800 Subject: [PATCH 720/861] drm/amdgpu: set finished fence error if job timedout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set finished fence to ETIME error if job timedout. Signed-off-by: ZhenGuo Yin Acked-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index c3d9d75143f4..aca3a2bfe8d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -65,6 +65,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", ti.process_name, ti.tgid, ti.task_name, ti.pid); + dma_fence_set_error(&s_job->s_fence->finished, -ETIME); + if (amdgpu_device_should_recover_gpu(ring->adev)) { struct amdgpu_reset_context reset_context; memset(&reset_context, 0, sizeof(reset_context)); From b3a02e8b61c19a0380870c713bc704d7e4f9e0dd Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Tue, 30 May 2023 04:03:16 +0530 Subject: [PATCH 721/861] drm/amdgpu: Fix up missing parameters kdoc in svm_migrate_vma_to_ram MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by adding & deleting the deviant arguments. gcc with W=1 drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_migrate.c:671: warning: Function parameter or member 'node' not described in 'svm_migrate_vma_to_ram' drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_migrate.c:671: warning: Function parameter or member 'trigger' not described in 'svm_migrate_vma_to_ram' drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_migrate.c:671: warning: Function parameter or member 'fault_page' not described in 'svm_migrate_vma_to_ram' drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_migrate.c:671: warning: Excess function parameter 'adev' description in 'svm_migrate_vma_to_ram' drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_migrate.c:771: warning: Function parameter or member 'fault_page' not described in 'svm_migrate_vram_to_ram' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 35cf6558cf1b..58d95fb99595 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -651,11 +651,13 @@ out_oom: /** * svm_migrate_vma_to_ram - migrate range inside one vma from device to system * - * @adev: amdgpu device to migrate from * @prange: svm range structure * @vma: vm_area_struct that range [start, end] belongs to * @start: range start virtual address in pages * @end: range end virtual address in pages + * @node: kfd node device to migrate from + * @trigger: reason of migration + * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * @@ -760,6 +762,7 @@ out: * @prange: range structure * @mm: process mm, use current->mm if NULL * @trigger: reason of migration + * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * From 23ed8833f231a3ea36d1c352737ef6f1eadfc899 Mon Sep 17 00:00:00 2001 From: Bob Zhou Date: Tue, 30 May 2023 14:48:02 +0800 Subject: [PATCH 722/861] drm/amdxcp: fix Makefile to build amdxcp module After drm conduct amdgpu Makefile, amdgpu.ko has been created and "amdgpu-y +=" in amdxcp Makefile isn't used. So modify amdgpu-y to amdxcp-y and build amdxcp module. Signed-off-by: Bob Zhou Reviewed-by: James Zhu Reviewed-by: Guchun Chen Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdxcp/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdxcp/Makefile b/drivers/gpu/drm/amd/amdxcp/Makefile index 5e1bd70748d4..870501a4bb8c 100644 --- a/drivers/gpu/drm/amd/amdxcp/Makefile +++ b/drivers/gpu/drm/amd/amdxcp/Makefile @@ -20,6 +20,6 @@ # OTHER DEALINGS IN THE SOFTWARE. # -amdgpu-y := amdgpu_xcp_drv.o +amdxcp-y := amdgpu_xcp_drv.o -obj-$(CONFIG_DRM_AMDGPU) += amdgpu_xcp_drv.o +obj-$(CONFIG_DRM_AMDGPU) += amdxcp.o From c00ebe9aeec6df816fa8a5a167cd1c102d02dd28 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 14:52:02 +0530 Subject: [PATCH 723/861] drm/amd/display: Fix up kdoc formatting in dcn32_resource_helpers.c Fixes the following W=1 kernel build warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:97: warning: Cannot understand * ************************************************************************** drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:264: warning: Cannot understand * ************************************************************************* drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:435: warning: Cannot understand * ************************************************************************* drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:475: warning: Cannot understand * ************************************************************************* drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:599: warning: Function parameter or member 'dc' not described in 'dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:599: warning: Function parameter or member 'context' not described in 'dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:587: warning: Function parameter or member 'dc' not described in 'dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_resource_helpers.c:587: warning: Function parameter or member 'context' not described in 'dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch' Cc: Hamza Mahfooz Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- .../display/dc/dcn32/dcn32_resource_helpers.c | 57 ++++++++----------- 1 file changed, 23 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index a8082580df92..1d13fd797212 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -94,18 +94,15 @@ uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( } /** - * ******************************************************************************************** - * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP + * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP * * Gets total allocation required for the phantom viewport calculated by DML in bytes and * converts to number of cache ways. * - * @param [in] dc: current dc state - * @param [in] context: new dc state + * @dc: current dc state + * @context: new dc state * - * @return: number of ways required for SubVP - * - * ******************************************************************************************** + * Return: number of ways required for SubVP */ uint32_t dcn32_helper_calculate_num_ways_for_subvp( struct dc *dc, @@ -261,8 +258,7 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe) #define DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER 7 /** - * ******************************************************************************************* - * dcn32_determine_det_override: Determine DET allocation for each pipe + * dcn32_determine_det_override(): Determine DET allocation for each pipe * * This function determines how much DET to allocate for each pipe. The total number of * DET segments will be split equally among each of the streams, and after that the DET @@ -290,13 +286,11 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe) * 3. Assign smaller DET size for lower pixel display and higher DET size for * higher pixel display * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed - * @param [in]: pipes: Array of DML pipes + * @dc: Current DC state + * @context: New DC state to be programmed + * @pipes: Array of DML pipes * - * @return: void - * - * ******************************************************************************************* + * Return: void */ void dcn32_determine_det_override(struct dc *dc, struct dc_state *context, @@ -432,8 +426,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, } /** - * ******************************************************************************************* - * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases + * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases * * This function saves the MALL (SubVP) case for fast validation cases. For fast validation, * there are situations where a shallow copy of the dc->current_state is created for the @@ -446,13 +439,11 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, * NOTE: This function ONLY works if the streams are not moved to a different pipe in the * validation. We don't expect this to happen in fast_validation=1 cases. * - * @param [in]: dc: Current DC state - * @param [in]: context: New DC state to be programmed - * @param [out]: temp_config: struct used to cache the existing MALL state + * @dc: Current DC state + * @context: New DC state to be programmed + * @temp_config: struct used to cache the existing MALL state * - * @return: void - * - * ******************************************************************************************* + * Return: void */ void dcn32_save_mall_state(struct dc *dc, struct dc_state *context, @@ -472,18 +463,15 @@ void dcn32_save_mall_state(struct dc *dc, } /** - * ******************************************************************************************* - * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases + * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases * * Restore the MALL state based on the previously saved state from dcn32_save_mall_state * - * @param [in]: dc: Current DC state - * @param [in/out]: context: New DC state to be programmed, restore MALL state into here - * @param [in]: temp_config: struct that has the cached MALL state + * @dc: Current DC state + * @context: New DC state to be programmed, restore MALL state into here + * @temp_config: struct that has the cached MALL state * - * @return: void - * - * ******************************************************************************************* + * Return: void */ void dcn32_restore_mall_state(struct dc *dc, struct dc_state *context, @@ -588,10 +576,11 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream) } /** - * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch - Determines if config can support FPO + * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can + * support FPO * - * @param [in]: dc - current dc state - * @param [in]: context - new dc state + * @dc: current dc state + * @context: new dc state * * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL */ From 3808c34b291925d8a0fda2f23136381c1feb1dd2 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 10:41:59 +0530 Subject: [PATCH 724/861] drm/amd/display: Fix up missing 'dc' & 'pipe_ctx' kdoc parameters in delay_cursor_until_vupdate() Fixes the following gcc with W=1: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c:1904: warning: Function parameter or member 'dc' not described in 'delay_cursor_until_vupdate' drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c:1904: warning: Function parameter or member 'pipe_ctx' not described in 'delay_cursor_until_vupdate' Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 13b4e5118459..20a1582be0b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1899,6 +1899,11 @@ void dcn10_pipe_control_lock( * * TODO: Optimize cursor programming to be once per frame before VUPDATE * to avoid the need for this workaround. + * + * @dc: Current DC state + * @pipe_ctx: Pipe_ctx pointer for delayed cursor update + * + * Return: void */ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) { From b5c07eaefc8b6e4aaa433f52ce74e619cd0ec386 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 09:22:36 +0530 Subject: [PATCH 725/861] drm/amd/display: Correct kdoc formatting for DCN32_CRB_SEGMENT_SIZE_KB in dcn32_hubbub.c Fixes the following W=1 kernel build warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn32/dcn32_hubbub.c:45: warning: Cannot understand * @DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for on line 45 - I thought it was a doc line Cc: Hamza Mahfooz Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index eb08ccc38e79..a18b9c0c5709 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -42,8 +42,8 @@ hubbub2->shifts->field_name, hubbub2->masks->field_name /** - * @DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for - * DCN32 + * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for + * DCN32 */ #define DCN32_CRB_SEGMENT_SIZE_KB 64 From 09521b5d49222d5ae932c4d738b2d55fb7abb415 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 30 May 2023 11:57:59 -0500 Subject: [PATCH 726/861] drm/amd: Disallow s0ix without BIOS support again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit cf488dcd0ab7 ("drm/amd: Allow s0ix without BIOS support") showed improvements to power consumption over suspend when s0ix wasn't enabled in BIOS and the system didn't support S3. This patch however was misguided because the reason the system didn't support S3 was because SMT was disabled in OEM BIOS setup. This prevented the BIOS from allowing S3. Also allowing GPUs to use the s2idle path actually causes problems if they're invoked on systems that may not support s2idle in the platform firmware. `systemd` has a tendency to try to use `s2idle` if `deep` fails for any reason, which could lead to unexpected flows. The original commit also fixed a problem during resume from suspend to idle without hardware support, but this is no longer necessary with commit ca4751866397 ("drm/amd: Don't allow s0ix on APUs older than Raven") Revert commit cf488dcd0ab7 ("drm/amd: Allow s0ix without BIOS support") to make it match the expected behavior again. Cc: Rafael Ávila de Espíndola Link: https://github.com/torvalds/linux/blob/v6.1/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c#L1060 Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/2599 Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index be881f21b340..73e4434d9b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1499,16 +1499,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) * S0ix even though the system is suspending to idle, so return false * in that case. */ - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { dev_warn_once(adev->dev, "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; + } #if !IS_ENABLED(CONFIG_AMD_PMC) dev_warn_once(adev->dev, "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); -#endif /* CONFIG_AMD_PMC */ + return false; +#else return true; +#endif /* CONFIG_AMD_PMC */ } #endif /* CONFIG_SUSPEND */ From 0df1106bfd13a9fe1eb7c33666bec091bc37c2a7 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 30 Mar 2023 11:20:40 -0400 Subject: [PATCH 727/861] drm/amdkfd: remove unused sq_int_priv variable clang with W=1 reports drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_int_process_v11.c:282:38: error: variable 'sq_int_priv' set but not used [-Werror,-Wunused-but-set-variable] uint8_t sq_int_enc, sq_int_errtype, sq_int_priv; ^ This variable is not used so remove it. Signed-off-by: Tom Rix Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 0f0fdea4cd8a..c2166bf964ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -279,7 +279,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, { uint16_t source_id, client_id, ring_id, pasid, vmid; uint32_t context_id0, context_id1; - uint8_t sq_int_enc, sq_int_errtype, sq_int_priv; + uint8_t sq_int_enc, sq_int_errtype; struct kfd_vm_fault_info info = {0}; struct kfd_hsa_memory_exception_data exception_data; @@ -348,13 +348,6 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, break; case SQ_INTERRUPT_WORD_ENCODING_INST: print_sq_intr_info_inst(context_id0, context_id1); - sq_int_priv = REG_GET_FIELD(context_id0, - SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); - /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, - KFD_CTXID0_DOORBELL_ID(context_id0), - KFD_CTXID0_TRAP_CODE(context_id0), - NULL, 0))) - return;*/ break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: print_sq_intr_info_error(context_id0, context_id1); From 1b320ad3f5a88602aef2f207bc211539a5496702 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Wed, 24 May 2023 14:59:32 -0400 Subject: [PATCH 728/861] drm/amd/amdgpu: introduce DRM_AMDGPU_WERROR We want to do -Werror builds on our CI. However, non-amdgpu breakages have prevented us from doing so thus far. Also, there are a number of additional checks that we should enable, that the community cares about and are hidden behind -Wextra. So, define DRM_AMDGPU_WERROR to only enable -Werror for the amdgpu kernel module and enable -Wextra while disabling all of the checks that are too noisy. Cc: Alex Deucher Cc: Kenny Ho Suggested-by: Jani Nikula Reviewed-by: Kenny Ho Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Kconfig | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/Makefile | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 12adca8c7819..b91e79c721e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -69,6 +69,16 @@ config DRM_AMDGPU_USERPTR This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it isn't already selected to enabled full userptr support. +config DRM_AMDGPU_WERROR + bool "Force the compiler to throw an error instead of a warning when compiling" + depends on DRM_AMDGPU + depends on EXPERT + depends on !COMPILE_TEST + default n + help + Add -Werror to the build flags for amdgpu.ko. + Only enable this if you are warning code for amdgpu.ko. + source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/amd/display/Kconfig" source "drivers/gpu/drm/amd/amdkfd/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 74a9aa6fe18c..7ee68b1bbfed 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -39,6 +39,15 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ -I$(FULL_AMD_PATH)/amdkfd +subdir-ccflags-y := -Wextra +subdir-ccflags-y += -Wunused-but-set-variable +subdir-ccflags-y += -Wno-unused-parameter +subdir-ccflags-y += -Wno-type-limits +subdir-ccflags-y += -Wno-sign-compare +subdir-ccflags-y += -Wno-missing-field-initializers +subdir-ccflags-y += -Wno-override-init +subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror + amdgpu-y := amdgpu_drv.o # add KMS driver From c60c9a5f9ab659e5ca9fa0e485a28e82fba761ce Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 00:34:10 +0530 Subject: [PATCH 729/861] drm/amd/display: Fix up kdoc formatting in display_mode_vba.c Fixes the following W=1 kernel build warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dml/display_mode_vba.c:936: warning: Cannot understand * ************************************************************************* Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 2f63ae954826..9a3ded311195 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -933,18 +933,16 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) } /** - * ******************************************************************************************** * cache_debug_params: Cache any params that needed to be maintained from the initial validation * for debug purposes. * * The DML getters can modify some of the VBA params that we are interested in (for example when * calculating with dummy p-state latency), so cache any params here that we want for debugging * - * @param [in] mode_lib: mode_lib input/output of validate call + * @mode_lib: mode_lib input/output of validate call * - * @return: void + * Return: void * - * ******************************************************************************************** */ static void cache_debug_params(struct display_mode_lib *mode_lib) { From ebe884e8b93351382290ae107c880230d3a1f125 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 00:12:22 +0530 Subject: [PATCH 730/861] drm/amdgpu: Fix up kdoc 'ring' parameter in sdma_v6_0_ring_pad_ib MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix this warning by adding 'ring' arguments to kdoc. gcc with W=1 drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c:1128: warning: Function parameter or member 'ring' not described in 'sdma_v6_0_ring_pad_ib' Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 967849c59ebe..3b03dda854fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1121,6 +1121,7 @@ static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib, /** * sdma_v6_0_ring_pad_ib - pad the IB * @ib: indirect buffer to fill with padding + * @ring: amdgpu ring pointer * * Pad the IB with NOPs to a boundary multiple of 8. */ From 2e9fee9b8e0e10fb9a4cba3ace607cebf7021bc1 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 25 May 2023 22:56:17 +0530 Subject: [PATCH 731/861] drm/amdgpu: Fix up kdoc in amdgpu_device.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix these warnings by deleting the deviant arguments. gcc with W=1 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:799: warning: Excess function parameter 'pcie_index' description in 'amdgpu_device_indirect_wreg' drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:799: warning: Excess function parameter 'pcie_data' description in 'amdgpu_device_indirect_wreg' drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:870: warning: Excess function parameter 'pcie_index' description in 'amdgpu_device_indirect_wreg64' drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:870: warning: Excess function parameter 'pcie_data' description in 'amdgpu_device_indirect_wreg64' Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8f50ca2bee97..e25f085ee886 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -789,8 +789,6 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, * amdgpu_device_indirect_wreg - write an indirect register address * * @adev: amdgpu_device pointer - * @pcie_index: mmio register offset - * @pcie_data: mmio register offset * @reg_addr: indirect register offset * @reg_data: indirect register data * @@ -860,8 +858,6 @@ void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address * * @adev: amdgpu_device pointer - * @pcie_index: mmio register offset - * @pcie_data: mmio register offset * @reg_addr: indirect register offset * @reg_data: indirect register data * From 16cc3a221537bb3588ec2a568d7bd0e7972b25a8 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 23:33:35 +0530 Subject: [PATCH 732/861] drm/amdgpu: Add function parameter 'event' to kdoc in svm_range_evict() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following gcc with W=1: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:1841: warning: Function parameter or member 'event' not described in 'svm_range_evict' Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9c88d6e90c3b..615eab3f78c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1826,6 +1826,7 @@ out_reschedule: * @mm: current process mm_struct * @start: starting process queue number * @last: last process queue number + * @event: mmu notifier event when range is evicted or migrated * * Stop all queues of the process to ensure GPU doesn't access the memory, then * return to let CPU evict the buffer and proceed CPU pagetable update. From cbb63eccc05626d0d111b335e44f111a3bb92871 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 29 May 2023 14:23:37 -0400 Subject: [PATCH 733/861] drm/amdgpu: fix Null pointer dereference error in amdgpu_device_recover_vram Use the function of amdgpu_bo_vm_destroy to handle the resource release of shadow bo. During the amdgpu_mes_self_test, shadow bo released, but vmbo->shadow_list was not, which caused a null pointer reference error in amdgpu_device_recover_vram when GPU reset. Fixes: 6c032c37ac3e ("drm/amdgpu: Fix vram recover doesn't work after whole GPU reset (v2)") Signed-off-by: xinhui pan Signed-off-by: Horatio Zhang Acked-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 1 - 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index d9e331508389..f76649e523a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; struct amdgpu_bo_vm *vmbo; + bo = shadow_bo->parent; vmbo = to_amdgpu_bo_vm(bo); /* in case amdgpu_device_recover_vram got NULL of bo->parent */ if (!list_empty(&vmbo->shadow_list)) { @@ -711,11 +712,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, return r; *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); - INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); - /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list - * is initialized. - */ - bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy; return r; } @@ -732,6 +728,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) mutex_lock(&adev->shadow_list_lock); list_add_tail(&vmbo->shadow_list, &adev->shadow_list); + vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); + vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; mutex_unlock(&adev->shadow_list_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index cc3b1b596e56..dea1a64be44d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -573,7 +573,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); amdgpu_bo_add_to_shadow_list(*vmbo); return 0; From 023f4d60747cadd96115c3c3b55986798322f3f6 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 3 Apr 2023 11:38:17 +0530 Subject: [PATCH 734/861] drm/amd/pm: Update SMUv13.0.6 PMFW headers Update PMFW interface headers to for new metrics table format and throttling information. v2: Added dummy definition for compilation error Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Asad Kamal Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../inc/pmfw_if/smu13_driver_if_v13_0_6.h | 31 ++++++++++++++----- .../pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h | 13 +++++--- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 2 ++ 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h index de84fff39799..ca4a5e99ccd1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h @@ -26,7 +26,7 @@ // *** IMPORTANT *** // PMFW TEAM: Always increment the interface version if // anything is changed in this file -#define SMU13_0_6_DRIVER_IF_VERSION 0x08042023 +#define SMU13_0_6_DRIVER_IF_VERSION 0x08042024 //I2C Interface #define NUM_I2C_CONTROLLERS 8 @@ -125,11 +125,28 @@ typedef struct { #define IH_INTERRUPT_ID_TO_DRIVER 0xFE #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 -//thermal over-temp mask defines -#define THROTTLER_TEMP_CCD_BIT 5 -#define THROTTLER_TEMP_XCD_BIT 6 -#define THROTTLER_TEMP_HBM_BIT 7 -#define THROTTLER_TEMP_AID_BIT 8 -#define THROTTLER_VRHOT_BIT 9 +//thermal over-temp mask defines for IH interrupt to host +#define THROTTLER_PROCHOT_BIT 0 +#define THROTTLER_PPT_BIT 1 +#define THROTTLER_THERMAL_SOCKET_BIT 2//AID, XCD, CCD throttling +#define THROTTLER_THERMAL_VR_BIT 3//VRHOT +#define THROTTLER_THERMAL_HBM_BIT 4 + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram +// #define TABLE_PPTABLE 0 +// #define TABLE_AVFS_PSM_DEBUG 1 +// #define TABLE_AVFS_FUSE_OVERRIDE 2 +// #define TABLE_PMSTATUSLOG 3 +// #define TABLE_SMU_METRICS 4 +// #define TABLE_DRIVER_SMU_CONFIG 5 +// #define TABLE_I2C_COMMANDS 6 +// #define TABLE_COUNT 7 + +// // Table transfer status +// #define TABLE_TRANSFER_OK 0x0 +// #define TABLE_TRANSFER_FAILED 0xFF +// #define TABLE_TRANSFER_PENDING 0xAB #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 3fe403615d86..252aef190c5c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,9 +123,9 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0x3 +#define SMU_METRICS_TABLE_VERSION 0x5 -typedef struct { +typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; //TEMPERATURE @@ -202,11 +202,16 @@ typedef struct { // New Items at end to maintain driver compatibility uint32_t GfxclkFrequency[8]; + + //PSNs + uint64_t PublicSerialNumber_AID[4]; + uint64_t PublicSerialNumber_XCD[8]; + uint64_t PublicSerialNumber_CCD[12]; } MetricsTable_t; -#define SMU_VF_METRICS_TABLE_VERSION 0x1 +#define SMU_VF_METRICS_TABLE_VERSION 0x3 -typedef struct { +typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; uint32_t InstGfxclk_TargFreq; uint64_t AccGfxclk_TargFreq; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 41b49cc827cd..27fd71afc73f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -82,6 +82,8 @@ #define smnPCIE_ESM_CTRL 0x111003D0 +#define THROTTLER_TEMP_HBM_BIT 2 + static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), From 93682f8a196718c2caf9b9b3de7894d5c0318f1f Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 31 Mar 2023 16:34:15 +0530 Subject: [PATCH 735/861] drm/amd/pm: Fix SMUv13.0.6 throttle status report Add throttle status in power context Keep throttle status indicator in SMUv13 power context v2: Removed Dummy definition Signed-off-by: Lijo Lazar Signed-off-by: Asad Kamal Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 1 + .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 95 +++++++++---------- 2 files changed, 46 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 3ae8d5d252a3..5a99a091965e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -119,6 +119,7 @@ struct smu_13_0_power_context { uint32_t power_source; uint8_t in_power_limit_boost_mode; enum smu_13_0_power_state power_state; + atomic_t throttle_status; }; #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 27fd71afc73f..b9f32e0364db 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -82,8 +82,6 @@ #define smnPCIE_ESM_CTRL 0x111003D0 -#define THROTTLER_TEMP_HBM_BIT 2 - static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -174,17 +172,12 @@ static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = { TAB_MAP(I2C_COMMANDS), }; -#define THROTTLER_PROCHOT_GFX_BIT 0 -#define THROTTLER_PPT_BIT 1 -#define THROTTLER_TEMP_SOC_BIT 2 -#define THROTTLER_TEMP_VR_GFX_BIT 3 - static const uint8_t smu_v13_0_6_throttler_map[] = { [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT), - [THROTTLER_TEMP_SOC_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT), - [THROTTLER_TEMP_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), - [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), - [THROTTLER_PROCHOT_GFX_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), + [THROTTLER_THERMAL_SOCKET_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT), + [THROTTLER_THERMAL_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), + [THROTTLER_THERMAL_VR_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), + [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), }; struct PPTable_t { @@ -642,16 +635,14 @@ static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1, return (abs(frequency1 - frequency2) <= EPSILON); } -static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu, - MetricsTable_t *metrics) +static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_13_0_power_context *power_context = smu_power->power_context; uint32_t throttler_status = 0; - throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0; - throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0; - throttler_status |= metrics->SocketThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_SOC_BIT : 0; - throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0; - throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0; + throttler_status = atomic_read(&power_context->throttle_status); + dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status); return throttler_status; } @@ -721,9 +712,6 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, case METRICS_TEMPERATURE_VRSOC: *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature); break; - case METRICS_THROTTLER_STATUS: - *value = smu_v13_0_6_get_throttler_status(smu, metrics); - break; default: *value = UINT_MAX; break; @@ -1290,13 +1278,11 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_13_0_power_context *power_context = smu_power->power_context; uint32_t client_id = entry->client_id; - uint32_t src_id = entry->src_id; - /* - * ctxid is used to distinguish different - * events for SMCToHost interrupt - */ uint32_t ctxid = entry->src_data[0]; + uint32_t src_id = entry->src_id; uint32_t data; if (client_id == SOC15_IH_CLIENTID_MP1) { @@ -1305,7 +1291,10 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); - + /* + * ctxid is used to distinguish different events for SMCToHost + * interrupt. + */ switch (ctxid) { case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: /* @@ -1316,8 +1305,17 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, if (!atomic_read(&adev->throttling_logging_enabled)) return 0; - if (__ratelimit(&adev->throttling_logging_rs)) + /* This uses the new method which fixes the + * incorrect throttling status reporting + * through metrics table. For older FWs, + * it will be ignored. + */ + if (__ratelimit(&adev->throttling_logging_rs)) { + atomic_set( + &power_context->throttle_status, + entry->src_data[1]); schedule_work(&smu->throttling_logging_work); + } break; } @@ -1895,37 +1893,35 @@ static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en) en ? 0 : 1, NULL); } -static const struct throttling_logging_label { - uint32_t feature_mask; - const char *label; -} logging_label[] = { - { (1U << THROTTLER_TEMP_HBM_BIT), "HBM" }, - { (1U << THROTTLER_TEMP_SOC_BIT), "SOC" }, - { (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" }, +static const char *const throttling_logging_label[] = { + [THROTTLER_PROCHOT_BIT] = "Prochot", + [THROTTLER_PPT_BIT] = "PPT", + [THROTTLER_THERMAL_SOCKET_BIT] = "SOC", + [THROTTLER_THERMAL_VR_BIT] = "VR", + [THROTTLER_THERMAL_HBM_BIT] = "HBM" }; + static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu) { - int ret; int throttler_idx, throtting_events = 0, buf_idx = 0; struct amdgpu_device *adev = smu->adev; uint32_t throttler_status; char log_buf[256]; - ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS, - &throttler_status); - if (ret) + throttler_status = smu_v13_0_6_get_throttler_status(smu); + if (!throttler_status) return; memset(log_buf, 0, sizeof(log_buf)); - for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label); + for (throttler_idx = 0; + throttler_idx < ARRAY_SIZE(throttling_logging_label); throttler_idx++) { - if (throttler_status & - logging_label[throttler_idx].feature_mask) { + if (throttler_status & (1U << throttler_idx)) { throtting_events++; - buf_idx += snprintf(log_buf + buf_idx, - sizeof(log_buf) - buf_idx, "%s%s", - throtting_events > 1 ? " and " : "", - logging_label[throttler_idx].label); + buf_idx += snprintf( + log_buf + buf_idx, sizeof(log_buf) - buf_idx, + "%s%s", throtting_events > 1 ? " and " : "", + throttling_logging_label[throttler_idx]); if (buf_idx >= sizeof(log_buf)) { dev_err(adev->dev, "buffer overflow!\n"); log_buf[sizeof(log_buf) - 1] = '\0'; @@ -1934,10 +1930,9 @@ static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu) } } - dev_warn( - adev->dev, - "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n", - log_buf); + dev_warn(adev->dev, + "WARN: GPU is throttled, expect performance decrease. %s.\n", + log_buf); kgd2kfd_smi_event_throttle( smu->adev->kfd.dev, smu_cmn_get_indep_throttler_status(throttler_status, From 5e86aa29a338f5c25e2d10d021bffc6b1b560ad5 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 24 May 2023 13:54:26 +0800 Subject: [PATCH 736/861] drm/amd/pm: add unique serial number support for smu_v13_0_6 add unique serial number support for smu_v13_0_6. (use aid0 serial number by default) Signed-off-by: Yang Wang Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 20 ++++++++----------- 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index b9f32e0364db..75255e0baf91 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -192,6 +192,7 @@ struct PPTable_t { uint32_t LclkFrequencyTable[4]; uint32_t MaxLclkDpmRange; uint32_t MinLclkDpmRange; + uint64_t PublicSerialNumber_AID; bool Init; }; @@ -352,6 +353,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]); } + /* use AID0 serial number by default */ + pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0]; + pptable->Init = true; } @@ -1856,19 +1860,11 @@ static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu) static void smu_v13_0_6_get_unique_id(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - //SmuMetrics_t *metrics = smu->smu_table.metrics_table; - uint32_t upper32 = 0, lower32 = 0; - int ret; + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; - ret = smu_cmn_get_metrics_table(smu, NULL, false); - if (ret) - goto out; - - //upper32 = metrics->PublicSerialNumUpper32; - //lower32 = metrics->PublicSerialNumLower32; - -out: - adev->unique_id = ((uint64_t)upper32 << 32) | lower32; + adev->unique_id = pptable->PublicSerialNumber_AID; if (adev->serial[0] == '\0') sprintf(adev->serial, "%016llx", adev->unique_id); } From 3c87de6d034fcb756a10523367219c5564a85fd3 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 31 Mar 2023 16:30:01 +0530 Subject: [PATCH 737/861] drm/amd/pm: Fix power context allocation in SMU13 Use the right data structure for allocation. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index b24c8549d0b1..70db36d45974 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -538,11 +538,11 @@ int smu_v13_0_init_power(struct smu_context *smu) if (smu_power->power_context || smu_power->power_context_size != 0) return -EINVAL; - smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), GFP_KERNEL); if (!smu_power->power_context) return -ENOMEM; - smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); + smu_power->power_context_size = sizeof(struct smu_13_0_power_context); return 0; } From 6ff5a1cff70441e1cd27614c359a66d29649e872 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 6 Apr 2023 12:08:21 +0800 Subject: [PATCH 738/861] drm/amd/pm: conditionally disable pcie lane switching for some sienna_cichlid SKUs Disable the pcie lane switching for some sienna_cichlid SKUs since it might not work well on some platforms. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 92 +++++++++++++++---- 1 file changed, 74 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 75f18681e984..85d53597eb07 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context return ret; } +static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu, + uint32_t *gen_speed_override, + uint32_t *lane_width_override) +{ + struct amdgpu_device *adev = smu->adev; + + *gen_speed_override = 0xff; + *lane_width_override = 0xff; + + switch (adev->pdev->device) { + case 0x73A0: + case 0x73A1: + case 0x73A2: + case 0x73A3: + case 0x73AB: + case 0x73AE: + /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ + *lane_width_override = 6; + break; + case 0x73E0: + case 0x73E1: + case 0x73E3: + *lane_width_override = 4; + break; + case 0x7420: + case 0x7421: + case 0x7422: + case 0x7423: + case 0x7424: + *lane_width_override = 3; + break; + default: + break; + } +} + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; - - uint32_t smu_pcie_arg; + struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; + uint32_t gen_speed_override, lane_width_override; uint8_t *table_member1, *table_member2; + uint32_t min_gen_speed, max_gen_speed; + uint32_t min_lane_width, max_lane_width; + uint32_t smu_pcie_arg; int ret, i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - /* lclk dpm table setup */ - for (i = 0; i < MAX_PCIE_CONF; i++) { - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i]; - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i]; + sienna_cichlid_get_override_pcie_settings(smu, + &gen_speed_override, + &lane_width_override); + + /* PCIE gen speed override */ + if (gen_speed_override != 0xff) { + min_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + max_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + } else { + min_gen_speed = MAX(0, table_member1[0]); + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); + min_gen_speed = min_gen_speed > max_gen_speed ? + max_gen_speed : min_gen_speed; } + pcie_table->pcie_gen[0] = min_gen_speed; + pcie_table->pcie_gen[1] = max_gen_speed; + + /* PCIE lane width override */ + if (lane_width_override != 0xff) { + min_lane_width = MIN(pcie_width_cap, lane_width_override); + max_lane_width = MIN(pcie_width_cap, lane_width_override); + } else { + min_lane_width = MAX(1, table_member2[0]); + max_lane_width = MIN(pcie_width_cap, table_member2[1]); + min_lane_width = min_lane_width > max_lane_width ? + max_lane_width : min_lane_width; + } + pcie_table->pcie_lane[0] = min_lane_width; + pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((table_member1[i] <= pcie_gen_cap) ? - (table_member1[i] << 8) : - (pcie_gen_cap << 8)) | - ((table_member2[i] <= pcie_width_cap) ? - table_member2[i] : - pcie_width_cap); + smu_pcie_arg = (i << 16 | + pcie_table->pcie_gen[i] << 8 | + pcie_table->pcie_lane[i]); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, @@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, NULL); if (ret) return ret; - - if (table_member1[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (table_member2[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; } return 0; From ba3c87fffb79311f54464288c66421d19c2c1234 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 2 Jun 2023 12:58:05 -0400 Subject: [PATCH 739/861] amd/amdkfd: drop unused KFD_IOCTL_SVM_FLAG_UNCACHED flag Was leftover from GC 9.4.3 bring up and is currently unused. Drop it for now. Cc: Philip.Yang@amd.com Cc: rajneesh.bhardwaj@amd.com Cc: Felix.Kuehling@amd.com Reviewed-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- include/uapi/linux/kfd_ioctl.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 615eab3f78c9..5ff1a5a89d96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1154,7 +1154,7 @@ svm_range_get_pte_flags(struct kfd_node *node, uint64_t pte_flags; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; - bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; + bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/ unsigned int mtype_local; if (domain == SVM_RANGE_VRAM_DOMAIN) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2a9671e1ddb5..2da5c3ad71bd 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -623,8 +623,6 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 -/* Uncached access to memory */ -#define KFD_IOCTL_SVM_FLAG_UNCACHED 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations From 4f98cf2baf9faee5b6f2f7889dad7c0f7686a787 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 2 Mar 2022 14:30:12 -0500 Subject: [PATCH 740/861] drm/amdkfd: add debug and runtime enable interface Introduce the GPU debug operations interface. For ROCm-GDB to extend the GNU Debugger's ability to inspect the AMD GPU instruction set, provide the necessary interface to allow the debugger to HW debug-mode set and query exceptions per HSA queue, process or device. The runtime_enable interface coordinates exception handling with the HSA runtime. Usage is available in the kern docs at uapi/linux/kfd_ioctl.h. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 48 ++ include/uapi/linux/kfd_ioctl.h | 668 ++++++++++++++++++++++- 2 files changed, 715 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a9efff94390b..00e34125987c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2729,6 +2729,48 @@ static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data) return ret; } +static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) +{ + return 0; +} + +static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) +{ + struct kfd_ioctl_dbg_trap_args *args = data; + int r = 0; + + if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Debugging does not support sched_policy %i", sched_policy); + return -EINVAL; + } + + switch (args->op) { + case KFD_IOC_DBG_TRAP_ENABLE: + case KFD_IOC_DBG_TRAP_DISABLE: + case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: + case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: + case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: + case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: + case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: + case KFD_IOC_DBG_TRAP_RESUME_QUEUES: + case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: + case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: + case KFD_IOC_DBG_TRAP_SET_FLAGS: + case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: + case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: + case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: + case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: + pr_warn("Debugging not supported yet\n"); + r = -EACCES; + break; + default: + pr_err("Invalid option: %i\n", args->op); + r = -EINVAL; + } + + return r; +} + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ .cmd_drv = 0, .name = #ioctl} @@ -2841,6 +2883,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF, kfd_ioctl_export_dmabuf, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE, + kfd_ioctl_runtime_enable, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP, + kfd_ioctl_set_debug_trap, 0), }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2da5c3ad71bd..32913d674d38 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -110,6 +110,32 @@ struct kfd_ioctl_get_available_memory_args { __u32 pad; }; +struct kfd_dbg_device_info_entry { + __u64 exception_status; + __u64 lds_base; + __u64 lds_limit; + __u64 scratch_base; + __u64 scratch_limit; + __u64 gpuvm_base; + __u64 gpuvm_limit; + __u32 gpu_id; + __u32 location_id; + __u32 vendor_id; + __u32 device_id; + __u32 revision_id; + __u32 subsystem_vendor_id; + __u32 subsystem_device_id; + __u32 fw_version; + __u32 gfx_target_version; + __u32 simd_count; + __u32 max_waves_per_simd; + __u32 array_count; + __u32 simd_arrays_per_engine; + __u32 num_xcc; + __u32 capability; + __u32 debug_prop; +}; + /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ #define KFD_IOC_CACHE_POLICY_COHERENT 0 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 @@ -773,6 +799,640 @@ struct kfd_ioctl_set_xnack_mode_args { __s32 xnack_enabled; }; +/* Wave launch override modes */ +enum kfd_dbg_trap_override_mode { + KFD_DBG_TRAP_OVERRIDE_OR = 0, + KFD_DBG_TRAP_OVERRIDE_REPLACE = 1 +}; + +/* Wave launch overrides */ +enum kfd_dbg_trap_mask { + KFD_DBG_TRAP_MASK_FP_INVALID = 1, + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2, + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4, + KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8, + KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16, + KFD_DBG_TRAP_MASK_FP_INEXACT = 32, + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64, + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128, + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256, + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30), + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31) +}; + +/* Wave launch modes */ +enum kfd_dbg_trap_wave_launch_mode { + KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0, + KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1, + KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3 +}; + +/* Address watch modes */ +enum kfd_dbg_trap_address_watch_mode { + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0, + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1, + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2, + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3 +}; + +/* Additional wave settings */ +enum kfd_dbg_trap_flags { + KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1, +}; + +/* Trap exceptions */ +enum kfd_dbg_trap_exception_code { + EC_NONE = 0, + /* per queue */ + EC_QUEUE_WAVE_ABORT = 1, + EC_QUEUE_WAVE_TRAP = 2, + EC_QUEUE_WAVE_MATH_ERROR = 3, + EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4, + EC_QUEUE_WAVE_MEMORY_VIOLATION = 5, + EC_QUEUE_WAVE_APERTURE_VIOLATION = 6, + EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16, + EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17, + EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18, + EC_QUEUE_PACKET_RESERVED = 19, + EC_QUEUE_PACKET_UNSUPPORTED = 20, + EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21, + EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22, + EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23, + EC_QUEUE_PREEMPTION_ERROR = 30, + EC_QUEUE_NEW = 31, + /* per device */ + EC_DEVICE_QUEUE_DELETE = 32, + EC_DEVICE_MEMORY_VIOLATION = 33, + EC_DEVICE_RAS_ERROR = 34, + EC_DEVICE_FATAL_HALT = 35, + EC_DEVICE_NEW = 36, + /* per process */ + EC_PROCESS_RUNTIME = 48, + EC_PROCESS_DEVICE_REMOVE = 49, + EC_MAX +}; + +/* Mask generated by ecode in kfd_dbg_trap_exception_code */ +#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1)) + +/* Masks for exception code type checks below */ +#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \ + KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \ + KFD_EC_MASK(EC_QUEUE_NEW)) +#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \ + KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \ + KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \ + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \ + KFD_EC_MASK(EC_DEVICE_NEW)) +#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ + KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) + +/* Checks for exception code types for KFD search */ +#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ + (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) +#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ + (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) +#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ + (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) + + +/* Runtime enable states */ +enum kfd_dbg_runtime_state { + DEBUG_RUNTIME_STATE_DISABLED = 0, + DEBUG_RUNTIME_STATE_ENABLED = 1, + DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2, + DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3 +}; + +/* Runtime enable status */ +struct kfd_runtime_info { + __u64 r_debug; + __u32 runtime_state; + __u32 ttmp_setup; +}; + +/* Enable modes for runtime enable */ +#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1 +#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2 + +/** + * kfd_ioctl_runtime_enable_args - Arguments for runtime enable + * + * Coordinates debug exception signalling and debug device enablement with runtime. + * + * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger + * @mode_mask - mask to set mode + * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable + * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable) + * @capabilities_mask - mask to notify runtime on what KFD supports + * + * Return - 0 on SUCCESS. + * - EBUSY if runtime enable call already pending. + * - EEXIST if user queues already active prior to call. + * If process is debug enabled, runtime enable will enable debug devices and + * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME + * to unblock - see kfd_ioctl_dbg_trap_args. + * + */ +struct kfd_ioctl_runtime_enable_args { + __u64 r_debug; + __u32 mode_mask; + __u32 capabilities_mask; +}; + +/* Queue information */ +struct kfd_queue_snapshot_entry { + __u64 exception_status; + __u64 ring_base_address; + __u64 write_pointer_address; + __u64 read_pointer_address; + __u64 ctx_save_restore_address; + __u32 queue_id; + __u32 gpu_id; + __u32 ring_size; + __u32 queue_type; + __u32 ctx_save_restore_area_size; + __u32 reserved; +}; + +/* Queue status return for suspend/resume */ +#define KFD_DBG_QUEUE_ERROR_BIT 30 +#define KFD_DBG_QUEUE_INVALID_BIT 31 +#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT) +#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT) + +/* Context save area header information */ +struct kfd_context_save_area_header { + struct { + __u32 control_stack_offset; + __u32 control_stack_size; + __u32 wave_state_offset; + __u32 wave_state_size; + } wave_state; + __u32 debug_offset; + __u32 debug_size; + __u64 err_payload_addr; + __u32 err_event_id; + __u32 reserved1; +}; + +/* + * Debug operations + * + * For specifics on usage and return values, see documentation per operation + * below. Otherwise, generic error returns apply: + * - ESRCH if the process to debug does not exist. + * + * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation + * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior. + * Also returns this error if GPU hardware scheduling is not supported. + * + * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not + * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow + * clean up of debug mode as long as process is debug enabled. + * + * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when + * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior. + * + * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call. + * + * - Other errors may be returned when a DBG_HW_OP occurs while the GPU + * is in a fatal state. + * + */ +enum kfd_dbg_trap_operations { + KFD_IOC_DBG_TRAP_ENABLE = 0, + KFD_IOC_DBG_TRAP_DISABLE = 1, + KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2, + KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3, + KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SET_FLAGS = 10, + KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11, + KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12, + KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13, + KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14 +}; + +/** + * kfd_ioctl_dbg_trap_enable_args + * + * Arguments for KFD_IOC_DBG_TRAP_ENABLE. + * + * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in + * kfd_ioctl_dbg_trap_args to disable debug session. + * + * @exception_mask (IN) - exceptions to raise to the debugger + * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info) + * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes + * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised + * exceptions set in exception_mask. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable. + * Size of kfd_runtime saved by the KFD returned to @rinfo_size. + * - EBADF if KFD cannot get a reference to dbg_fd. + * - EFAULT if KFD cannot copy runtime info to rinfo_ptr. + * - EINVAL if target process is already debug enabled. + * + */ +struct kfd_ioctl_dbg_trap_enable_args { + __u64 exception_mask; + __u64 rinfo_ptr; + __u32 rinfo_size; + __u32 dbg_fd; +}; + +/** + * kfd_ioctl_dbg_trap_send_runtime_event_args + * + * + * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT. + * Raises exceptions to runtime. + * + * @exception_mask (IN) - exceptions to raise to runtime + * @gpu_id (IN) - target device id + * @queue_id (IN) - target queue id + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * - ENODEV if gpu_id not found. + * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending + * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args. + * All other exceptions are raised to runtime through err_payload_addr. + * See kfd_context_save_area_header. + */ +struct kfd_ioctl_dbg_trap_send_runtime_event_args { + __u64 exception_mask; + __u32 gpu_id; + __u32 queue_id; +}; + +/** + * kfd_ioctl_dbg_trap_set_exceptions_enabled_args + * + * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED + * Set new exceptions to be raised to the debugger. + * + * @exception_mask (IN) - new exceptions to raise the debugger + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + */ +struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args { + __u64 exception_mask; +}; + +/** + * kfd_ioctl_dbg_trap_set_wave_launch_override_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE + * Enable HW exceptions to raise trap. + * + * @override_mode (IN) - see kfd_dbg_trap_override_mode + * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask. + * IN is the override modes requested to be enabled. + * OUT is referenced in Return below. + * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask. + * IN is the override modes requested for support check. + * OUT is referenced in Return below. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Previous enablement is returned in @enable_mask. + * Actual override support is returned in @support_request_mask. + * - EINVAL if override mode is not supported. + * - EACCES if trap support requested is not actually supported. + * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT). + * Otherwise it is considered a generic error (see kfd_dbg_trap_operations). + */ +struct kfd_ioctl_dbg_trap_set_wave_launch_override_args { + __u32 override_mode; + __u32 enable_mask; + __u32 support_request_mask; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_set_wave_launch_mode_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE + * Set wave launch mode. + * + * @mode (IN) - see kfd_dbg_trap_wave_launch_mode + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + */ +struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args { + __u32 launch_mode; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_suspend_queues_ags + * + * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES + * Suspend queues. + * + * @exception_mask (IN) - raised exceptions to clear + * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) + * to suspend + * @num_queues (IN) - number of queues to suspend in @queue_array_ptr + * @grace_period (IN) - wave time allowance before preemption + * per 1K GPU clock cycle unit + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Destruction of a suspended queue is blocked until the queue is + * resumed. This allows the debugger to access queue information and + * the its context save area without running into a race condition on + * queue destruction. + * Automatically copies per queue context save area header information + * into the save area base + * (see kfd_queue_snapshot_entry and kfd_context_save_area_header). + * + * Return - Number of queues suspended on SUCCESS. + * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked + * for each queue id in @queue_array_ptr array reports unsuccessful + * suspend reason. + * KFD_DBG_QUEUE_ERROR_MASK = HW failure. + * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or + * is being destroyed. + */ +struct kfd_ioctl_dbg_trap_suspend_queues_args { + __u64 exception_mask; + __u64 queue_array_ptr; + __u32 num_queues; + __u32 grace_period; +}; + +/** + * kfd_ioctl_dbg_trap_resume_queues_args + * + * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES + * Resume queues. + * + * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) + * to resume + * @num_queues (IN) - number of queues to resume in @queue_array_ptr + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - Number of queues resumed on SUCCESS. + * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask + * for each queue id in @queue_array_ptr array reports unsuccessful + * resume reason. + * KFD_DBG_QUEUE_ERROR_MASK = HW failure. + * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist. + */ +struct kfd_ioctl_dbg_trap_resume_queues_args { + __u64 queue_array_ptr; + __u32 num_queues; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_set_node_address_watch_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH + * Sets address watch for device. + * + * @address (IN) - watch address to set + * @mode (IN) - see kfd_dbg_trap_address_watch_mode + * @mask (IN) - watch address mask + * @gpu_id (IN) - target gpu to set watch point + * @id (OUT) - watch id allocated + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Allocated watch ID returned to @id. + * - ENODEV if gpu_id not found. + * - ENOMEM if watch IDs can be allocated + */ +struct kfd_ioctl_dbg_trap_set_node_address_watch_args { + __u64 address; + __u32 mode; + __u32 mask; + __u32 gpu_id; + __u32 id; +}; + +/** + * kfd_ioctl_dbg_trap_clear_node_address_watch_args + * + * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH + * Clear address watch for device. + * + * @gpu_id (IN) - target device to clear watch point + * @id (IN) - allocated watch id to clear + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * - ENODEV if gpu_id not found. + * - EINVAL if watch ID has not been allocated. + */ +struct kfd_ioctl_dbg_trap_clear_node_address_watch_args { + __u32 gpu_id; + __u32 id; +}; + +/** + * kfd_ioctl_dbg_trap_set_flags_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS + * Sets flags for wave behaviour. + * + * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * - EACCESS if any debug device does not allow flag options. + */ +struct kfd_ioctl_dbg_trap_set_flags_args { + __u32 flags; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_query_debug_event_args + * + * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT + * + * Find one or more raised exceptions. This function can return multiple + * exceptions from a single queue or a single device with one call. To find + * all raised exceptions, this function must be called repeatedly until it + * returns -EAGAIN. Returned exceptions can optionally be cleared by + * setting the corresponding bit in the @exception_mask input parameter. + * However, clearing an exception prevents retrieving further information + * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO. + * + * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT) + * @gpu_id (OUT) - gpu id of exceptions raised + * @queue_id (OUT) - queue id of exceptions raised + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on raised exception found + * Raised exceptions found are returned in @exception mask + * with reported source id returned in @gpu_id or @queue_id. + * - EAGAIN if no raised exception has been found + */ +struct kfd_ioctl_dbg_trap_query_debug_event_args { + __u64 exception_mask; + __u32 gpu_id; + __u32 queue_id; +}; + +/** + * kfd_ioctl_dbg_trap_query_exception_info_args + * + * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO + * Get additional info on raised exception. + * + * @info_ptr (IN) - pointer to exception info buffer to copy to + * @info_size (IN/OUT) - exception info buffer size (bytes) + * @source_id (IN) - target gpu or queue id + * @exception_code (IN) - target exception + * @clear_exception (IN) - clear raised @exception_code exception + * (0 = false, 1 = true) + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT) + * bytes of memory exception data to @info_ptr. + * If @exception_code is EC_PROCESS_RUNTIME, copy saved + * kfd_runtime_info to @info_ptr. + * Actual required @info_ptr size (bytes) is returned in @info_size. + */ +struct kfd_ioctl_dbg_trap_query_exception_info_args { + __u64 info_ptr; + __u32 info_size; + __u32 source_id; + __u32 exception_code; + __u32 clear_exception; +}; + +/** + * kfd_ioctl_dbg_trap_get_queue_snapshot_args + * + * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT + * Get queue information. + * + * @exception_mask (IN) - exceptions raised to clear + * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry) + * @num_queues (IN/OUT) - number of queue snapshot entries + * The debugger specifies the size of the array allocated in @num_queues. + * KFD returns the number of queues that actually existed. If this is + * larger than the size specified by the debugger, KFD will not overflow + * the array allocated by the debugger. + * + * @entry_size (IN/OUT) - size per entry in bytes + * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in + * @entry_size. KFD returns the number of bytes actually populated per + * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine, + * which fields in struct kfd_queue_snapshot_entry are valid. This allows + * growing the ABI in a backwards compatible manner. + * Note that entry_size(IN) should still be used to stride the snapshot buffer in the + * event that it's larger than actual kfd_queue_snapshot_entry. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN) + * into @snapshot_buf_ptr if @num_queues(IN) > 0. + * Otherwise return @num_queues(OUT) queue snapshot entries that exist. + */ +struct kfd_ioctl_dbg_trap_queue_snapshot_args { + __u64 exception_mask; + __u64 snapshot_buf_ptr; + __u32 num_queues; + __u32 entry_size; +}; + +/** + * kfd_ioctl_dbg_trap_get_device_snapshot_args + * + * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT + * Get device information. + * + * @exception_mask (IN) - exceptions raised to clear + * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry) + * @num_devices (IN/OUT) - number of debug devices to snapshot + * The debugger specifies the size of the array allocated in @num_devices. + * KFD returns the number of devices that actually existed. If this is + * larger than the size specified by the debugger, KFD will not overflow + * the array allocated by the debugger. + * + * @entry_size (IN/OUT) - size per entry in bytes + * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in + * @entry_size. KFD returns the number of bytes actually populated. The + * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields + * in struct kfd_dbg_device_info_entry are valid. This allows growing the + * ABI in a backwards compatible manner. + * Note that entry_size(IN) should still be used to stride the snapshot buffer in the + * event that it's larger than actual kfd_dbg_device_info_entry. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN) + * into @snapshot_buf_ptr if @num_devices(IN) > 0. + * Otherwise return @num_devices(OUT) queue snapshot entries that exist. + */ +struct kfd_ioctl_dbg_trap_device_snapshot_args { + __u64 exception_mask; + __u64 snapshot_buf_ptr; + __u32 num_devices; + __u32 entry_size; +}; + +/** + * kfd_ioctl_dbg_trap_args + * + * Arguments to debug target process. + * + * @pid - target process to debug + * @op - debug operation (see kfd_dbg_trap_operations) + * + * @op determines which union struct args to use. + * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct. + */ +struct kfd_ioctl_dbg_trap_args { + __u32 pid; + __u32 op; + + union { + struct kfd_ioctl_dbg_trap_enable_args enable; + struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event; + struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled; + struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override; + struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode; + struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues; + struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues; + struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch; + struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch; + struct kfd_ioctl_dbg_trap_set_flags_args set_flags; + struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event; + struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info; + struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot; + struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot; + }; +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -887,7 +1547,13 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_EXPORT_DMABUF \ AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args) +#define AMDKFD_IOC_RUNTIME_ENABLE \ + AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args) + +#define AMDKFD_IOC_DBG_TRAP \ + AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x25 +#define AMDKFD_COMMAND_END 0x27 #endif From d230f1bfe7a1977565ce1e2804ddb7b7a3d911ff Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 25 Mar 2022 12:39:06 -0400 Subject: [PATCH 741/861] drm/amdkfd: display debug capabilities Expose debug capabilities in the KFD topology node's HSA capabilities and debug properties flags. Ensure correct capabilities are exposed based on firmware support. Flag definitions can be referenced in uapi/linux/kfd_sysfs.h. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 101 ++++++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 6 ++ include/uapi/linux/kfd_sysfs.h | 15 ++++ 3 files changed, 117 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 8302d8967158..3def25b2bdbb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -535,6 +535,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd->mec_fw_version); sysfs_show_32bit_prop(buffer, offs, "capability", dev->node_props.capability); + sysfs_show_64bit_prop(buffer, offs, "debug_prop", + dev->node_props.debug_prop); sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", @@ -1857,6 +1859,97 @@ err: return res; } +static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev) +{ + bool firmware_supported = true; + + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { + firmware_supported = + (dev->gpu->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 9; + goto out; + } + + /* + * Note: Any unlisted devices here are assumed to support exception handling. + * Add additional checks here as needed. + */ + switch (KFD_GC_VERSION(dev->gpu)) { + case IP_VERSION(9, 0, 1): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768; + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 459; + break; + case IP_VERSION(9, 4, 1): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 60; + break; + case IP_VERSION(9, 4, 2): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 51; + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 1): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 144; + break; + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 89; + break; + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 3): + firmware_supported = false; + break; + default: + break; + } + +out: + if (firmware_supported) + dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED; +} + +static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) +{ + dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << + HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & + HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); + + dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT | + HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED | + HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED; + + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { + dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 4, 2)) + dev->node_props.debug_prop |= + HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + else + dev->node_props.capability |= + HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + } else { + dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(11, 0, 0)) + dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + else + dev->node_props.capability |= + HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + } + + kfd_topology_set_dbg_firmware_support(dev); +} + int kfd_topology_add_device(struct kfd_node *gpu) { uint32_t gpu_id; @@ -1967,13 +2060,11 @@ int kfd_topology_add_device(struct kfd_node *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; default: - if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1)) - dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << - HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & - HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); - else + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1)) WARN(1, "Unexpected ASIC family %u", dev->gpu->adev->asic_type); + else + kfd_topology_set_capabilities(dev); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 3b8afb6aba79..cba2cd5ed9d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -31,6 +31,11 @@ #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 6 +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 7 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT \ + (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT) + struct kfd_node_properties { uint64_t hive_id; uint32_t cpu_cores_count; @@ -42,6 +47,7 @@ struct kfd_node_properties { uint32_t cpu_core_id_base; uint32_t simd_id_base; uint32_t capability; + uint64_t debug_prop; uint32_t max_waves_per_simd; uint32_t lds_size_in_kb; uint32_t gds_size_in_kb; diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h index 3e330f368917..a51b7331e0b4 100644 --- a/include/uapi/linux/kfd_sysfs.h +++ b/include/uapi/linux/kfd_sysfs.h @@ -43,6 +43,11 @@ #define HSA_CAP_DOORBELL_TYPE_2_0 0x2 #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 +#define HSA_CAP_TRAP_DEBUG_SUPPORT 0x00008000 +#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED 0x00010000 +#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x00020000 +#define HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED 0x00040000 + /* Old buggy user mode depends on this being 0 */ #define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 @@ -53,8 +58,18 @@ #define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 #define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 #define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 +#define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED 0x20000000 #define HSA_CAP_RESERVED 0xe00f8000 +/* debug_prop bits in node properties */ +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_MASK 0x000003f0 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT 4 +#define HSA_DBG_DISPATCH_INFO_ALWAYS_VALID 0x00000400 +#define HSA_DBG_WATCHPOINTS_EXCLUSIVE 0x00000800 +#define HSA_DBG_RESERVED 0xfffffffffffff000ull + /* Heap types in memory properties */ #define HSA_MEM_HEAP_TYPE_SYSTEM 0 #define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 From 0ab2d7532b05a3e7c06fd3b0c8bd6b46c1dfb508 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 25 Mar 2022 14:55:30 -0400 Subject: [PATCH 742/861] drm/amdkfd: prepare per-process debug enable and disable The ROCm debugger will attach to a process to debug by PTRACE and will expect the KFD to prepare a process for the target PID, whether the target PID has opened the KFD device or not. This patch is to explicity handle this requirement. Further HW mode setting and runtime coordination requirements will be handled in following patches. In the case where the target process has not opened the KFD device, a new KFD process must be created for the target PID. The debugger as well as the target process for this case will have not acquired any VMs so handle process restoration to correctly account for this. To coordinate with HSA runtime, the debugger must be aware of the target process' runtime enablement status and will copy the runtime status information into the debugged KFD process for later query. On enablement, the debugger will subscribe to a set of exceptions where each exception events will notify the debugger through a pollable FIFO file descriptor that the debugger provides to the KFD to manage. Finally on process termination of either the debugger or the target, debugging must be disabled if it has not been done so. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/Makefile | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 102 +++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 80 ++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 32 ++++++ .../drm/amd/amdkfd/kfd_device_queue_manager.c | 26 +++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 31 +++++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 60 +++++++---- 7 files changed, 304 insertions(+), 30 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_debug.c create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_debug.h diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index e758c2a24cd0..747754428073 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -55,7 +55,8 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_int_process_v9.o \ $(AMDKFD_PATH)/kfd_int_process_v11.o \ $(AMDKFD_PATH)/kfd_smi_events.o \ - $(AMDKFD_PATH)/kfd_crat.o + $(AMDKFD_PATH)/kfd_crat.o \ + $(AMDKFD_PATH)/kfd_debug.o ifneq ($(CONFIG_AMD_IOMMU_V2),) AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 00e34125987c..ee086a0a46df 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -44,6 +44,7 @@ #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" #include "amdgpu_dma_buf.h" +#include "kfd_debug.h" static long kfd_ioctl(struct file *, unsigned int, unsigned long); static int kfd_open(struct inode *, struct file *); @@ -142,10 +143,15 @@ static int kfd_open(struct inode *inode, struct file *filep) return -EPERM; } - process = kfd_create_process(filep); + process = kfd_create_process(current); if (IS_ERR(process)) return PTR_ERR(process); + if (kfd_process_init_cwsr_apu(process, filep)) { + kfd_unref_process(process); + return -EFAULT; + } + /* filep now owns the reference returned by kfd_create_process */ filep->private_data = process; @@ -2737,6 +2743,10 @@ static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, v static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_dbg_trap_args *args = data; + struct task_struct *thread = NULL; + struct mm_struct *mm = NULL; + struct pid *pid = NULL; + struct kfd_process *target = NULL; int r = 0; if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { @@ -2744,9 +2754,81 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v return -EINVAL; } + pid = find_get_pid(args->pid); + if (!pid) { + pr_debug("Cannot find pid info for %i\n", args->pid); + r = -ESRCH; + goto out; + } + + thread = get_pid_task(pid, PIDTYPE_PID); + if (!thread) { + r = -ESRCH; + goto out; + } + + mm = get_task_mm(thread); + if (!mm) { + r = -ESRCH; + goto out; + } + + if (args->op == KFD_IOC_DBG_TRAP_ENABLE) { + bool create_process; + + rcu_read_lock(); + create_process = thread && thread != current && ptrace_parent(thread) == current; + rcu_read_unlock(); + + target = create_process ? kfd_create_process(thread) : + kfd_lookup_process_by_pid(pid); + } else { + target = kfd_lookup_process_by_pid(pid); + } + + if (!target) { + pr_debug("Cannot find process PID %i to debug\n", args->pid); + r = -ESRCH; + goto out; + } + + /* Check if target is still PTRACED. */ + rcu_read_lock(); + if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE + && ptrace_parent(target->lead_thread) != current) { + pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid); + r = -EPERM; + } + rcu_read_unlock(); + + if (r) + goto out; + + mutex_lock(&target->mutex); + + if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) { + pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op); + r = -EINVAL; + goto unlock_out; + } + switch (args->op) { case KFD_IOC_DBG_TRAP_ENABLE: + if (target != p) + target->debugger_process = p; + + r = kfd_dbg_trap_enable(target, + args->enable.dbg_fd, + (void __user *)args->enable.rinfo_ptr, + &args->enable.rinfo_size); + if (!r) + target->exception_enable_mask = args->enable.exception_mask; + + pr_warn("Debug functions limited\n"); + break; case KFD_IOC_DBG_TRAP_DISABLE: + r = kfd_dbg_trap_disable(target); + break; case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: @@ -2760,7 +2842,7 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: - pr_warn("Debugging not supported yet\n"); + pr_warn("Debug op %i not supported yet\n", args->op); r = -EACCES; break; default: @@ -2768,6 +2850,22 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v r = -EINVAL; } +unlock_out: + mutex_unlock(&target->mutex); + +out: + if (thread) + put_task_struct(thread); + + if (mm) + mmput(mm); + + if (pid) + put_pid(pid); + + if (target) + kfd_unref_process(target); + return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c new file mode 100644 index 000000000000..898cc1fe3d13 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -0,0 +1,80 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kfd_debug.h" +#include + +int kfd_dbg_trap_disable(struct kfd_process *target) +{ + if (!target->debug_trap_enabled) + return 0; + + fput(target->dbg_ev_file); + target->dbg_ev_file = NULL; + + if (target->debugger_process) { + atomic_dec(&target->debugger_process->debugged_process_count); + target->debugger_process = NULL; + } + + target->debug_trap_enabled = false; + kfd_unref_process(target); + + return 0; +} + +int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, + void __user *runtime_info, uint32_t *runtime_size) +{ + struct file *f; + uint32_t copy_size; + int r = 0; + + if (target->debug_trap_enabled) + return -EALREADY; + + copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info)); + + f = fget(fd); + if (!f) { + pr_err("Failed to get file for (%i)\n", fd); + return -EBADF; + } + + target->dbg_ev_file = f; + + /* We already hold the process reference but hold another one for the + * debug session. + */ + kref_get(&target->ref); + target->debug_trap_enabled = true; + + if (target->debugger_process) + atomic_inc(&target->debugger_process->debugged_process_count); + + if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) + r = -EFAULT; + + *runtime_size = sizeof(target->runtime_info); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h new file mode 100644 index 000000000000..a8abfe2a0a14 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_DEBUG_EVENTS_H_INCLUDED +#define KFD_DEBUG_EVENTS_H_INCLUDED + +#include "kfd_priv.h" + +int kfd_dbg_trap_disable(struct kfd_process *target); +int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, + void __user *runtime_info, + uint32_t *runtime_info_size); +#endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 80cddb46657f..2baa0781eafc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1006,6 +1006,14 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); + + /* The debugger creates processes that temporarily have not acquired + * all VMs for all devices and has no VMs itself. + * Skip queue eviction on process eviction. + */ + if (!pdd->drm_priv) + goto out; + pr_debug_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); @@ -1127,13 +1135,10 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, { struct queue *q; struct kfd_process_device *pdd; - uint64_t pd_base; uint64_t eviction_duration; int retval = 0; pdd = qpd_to_pdd(qpd); - /* Retrieve PD base */ - pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); dqm_lock(dqm); if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ @@ -1143,12 +1148,19 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, goto out; } + /* The debugger creates processes that temporarily have not acquired + * all VMs for all devices and has no VMs itself. + * Skip queue restore on process restore. + */ + if (!pdd->drm_priv) + goto vm_not_acquired; + pr_debug_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ - qpd->page_table_base = pd_base; - pr_debug("Updated PD address to 0x%llx\n", pd_base); + qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); + pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base); /* activate all active queues on the qpd */ list_for_each_entry(q, &qpd->queues_list, list) { @@ -1171,9 +1183,11 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - qpd->evicted = 0; + eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; atomic64_add(eviction_duration, &pdd->evict_duration_counter); +vm_not_acquired: + qpd->evicted = 0; out: dqm_unlock(dqm); return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 3bd222e8f6c3..1b272f879b4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -920,11 +920,33 @@ struct kfd_process { */ unsigned long last_restore_timestamp; + /* Indicates device process is debug attached with reserved vmid. */ + bool debug_trap_enabled; + + /* per-process-per device debug event fd file */ + struct file *dbg_ev_file; + + /* If the process is a kfd debugger, we need to know so we can clean + * up at exit time. If a process enables debugging on itself, it does + * its own clean-up, so we don't set the flag here. We track this by + * counting the number of processes this process is debugging. + */ + atomic_t debugged_process_count; + + /* If the process is a debugged, this is the debugger process */ + struct kfd_process *debugger_process; + /* Kobj for our procfs */ struct kobject *kobj; struct kobject *kobj_queues; struct attribute attr_pasid; + /* Keep track cwsr init */ + bool has_cwsr; + + /* Exception code enable mask and status */ + uint64_t exception_enable_mask; + /* shared virtual memory registered by this process */ struct svm_range_list svms; @@ -933,6 +955,10 @@ struct kfd_process { atomic_t poison; /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ bool queues_paused; + + /* Tracks runtime enable status */ + struct kfd_runtime_info runtime_info; + }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ @@ -963,7 +989,7 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); void kfd_cleanup_processes(void); -struct kfd_process *kfd_create_process(struct file *filep); +struct kfd_process *kfd_create_process(struct task_struct *thread); struct kfd_process *kfd_get_process(const struct task_struct *task); struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); @@ -1108,6 +1134,9 @@ void kfd_process_set_trap_handler(struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr); +/* CWSR initialization */ +int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); + /* CRIU */ /* * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7f7d1378a2f8..ef67f5e37c6f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -44,6 +44,7 @@ struct mm_struct; #include "kfd_iommu.h" #include "kfd_svm.h" #include "kfd_smi_events.h" +#include "kfd_debug.h" /* * List of struct kfd_process (field kfd_process). @@ -69,7 +70,6 @@ static struct kfd_process *find_process(const struct task_struct *thread, bool ref); static void kfd_process_ref_release(struct kref *ref); static struct kfd_process *create_process(const struct task_struct *thread); -static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep); static void evict_process_worker(struct work_struct *work); static void restore_process_worker(struct work_struct *work); @@ -798,18 +798,19 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); } -struct kfd_process *kfd_create_process(struct file *filep) +struct kfd_process *kfd_create_process(struct task_struct *thread) { struct kfd_process *process; - struct task_struct *thread = current; int ret; - if (!thread->mm) + if (!(thread->mm && mmget_not_zero(thread->mm))) return ERR_PTR(-EINVAL); /* Only the pthreads threading model is supported. */ - if (thread->group_leader->mm != thread->mm) + if (thread->group_leader->mm != thread->mm) { + mmput(thread->mm); return ERR_PTR(-EINVAL); + } /* * take kfd processes mutex before starting of process creation @@ -833,10 +834,6 @@ struct kfd_process *kfd_create_process(struct file *filep) if (IS_ERR(process)) goto out; - ret = kfd_process_init_cwsr_apu(process, filep); - if (ret) - goto out_destroy; - if (!procfs.kobj) goto out; @@ -870,16 +867,9 @@ out: if (!IS_ERR(process)) kref_get(&process->ref); mutex_unlock(&kfd_processes_mutex); + mmput(thread->mm); return process; - -out_destroy: - hash_del_rcu(&process->kfd_processes); - mutex_unlock(&kfd_processes_mutex); - synchronize_srcu(&kfd_processes_srcu); - /* kfd_process_free_notifier will trigger the cleanup */ - mmu_notifier_put(&process->mmu_notifier); - return ERR_PTR(ret); } struct kfd_process *kfd_get_process(const struct task_struct *thread) @@ -1180,6 +1170,25 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p) /* Indicate to other users that MM is no longer valid */ p->mm = NULL; + kfd_dbg_trap_disable(p); + + if (atomic_read(&p->debugged_process_count) > 0) { + struct kfd_process *target; + unsigned int temp; + int idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) { + if (target->debugger_process && target->debugger_process == p) { + mutex_lock_nested(&target->mutex, 1); + kfd_dbg_trap_disable(target); + mutex_unlock(&target->mutex); + if (atomic_read(&p->debugged_process_count) == 0) + break; + } + } + + srcu_read_unlock(&kfd_processes_srcu, idx); + } mmu_notifier_put(&p->mmu_notifier); } @@ -1259,11 +1268,14 @@ void kfd_cleanup_processes(void) mmu_notifier_synchronize(); } -static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) +int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) { unsigned long offset; int i; + if (p->has_cwsr) + return 0; + for (i = 0; i < p->n_pdds; i++) { struct kfd_node *dev = p->pdds[i]->dev; struct qcm_process_device *qpd = &p->pdds[i]->qpd; @@ -1292,6 +1304,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); } + p->has_cwsr = true; + return 0; } @@ -1434,6 +1448,10 @@ static struct kfd_process *create_process(const struct task_struct *thread) if (err) goto err_event_init; process->is_32bit_user_mode = in_compat_syscall(); + process->debug_trap_enabled = false; + process->debugger_process = NULL; + process->exception_enable_mask = 0; + atomic_set(&process->debugged_process_count, 0); process->pasid = kfd_pasid_alloc(); if (process->pasid == 0) { @@ -1967,8 +1985,10 @@ static void restore_process_worker(struct work_struct *work) */ p->last_restore_timestamp = get_jiffies_64(); - ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, - &p->ef); + /* VMs may not have been acquired yet during debugging. */ + if (p->kgd_process_info) + ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, + &p->ef); if (ret) { pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); From 08ca712270028111b22e4b159d11dbd6b770135e Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 30 Mar 2022 14:54:16 -0400 Subject: [PATCH 743/861] drm/amdgpu: add kgd hw debug mode setting interface Introduce the require KGD debug calls that will execute hardware debug mode setting. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/include/kgd_kfd_interface.h | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 8cb3dbcae3e4..d0df3381539f 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -291,6 +291,40 @@ struct kfd2kgd_calls { uint32_t vmid, uint64_t page_table_base); uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); + uint32_t (*enable_debug_trap)(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid); + uint32_t (*disable_debug_trap)(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid); + int (*validate_trap_override_request)(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported); + uint32_t (*set_wave_launch_trap_override)(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev); + uint32_t (*set_wave_launch_mode)(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid); + uint32_t (*set_address_watch)(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid); + uint32_t (*clear_address_watch)(struct amdgpu_device *adev, + uint32_t watch_id); + void (*get_iq_wait_times)(struct amdgpu_device *adev, + uint32_t *wait_times); + void (*build_grace_period_packet_info)(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, int *wave_cnt, int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, From 4504f14338cdc43586189558113faafa8acb9ffe Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 31 Mar 2022 12:05:00 -0400 Subject: [PATCH 744/861] drm/amdgpu: setup hw debug registers on driver initialization Add missing debug trap registers references and initialize all debug registers on boot by clearing the hardware exception overrides and the wave allocation ID index. The debugger requires that TTMPs 6 & 7 save the dispatch ID to map waves onto dispatch during compute context inspection. In order to correctly set this up, set the special reserved CP bit by default whenever the MQD is initailized. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 26 +++++++ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 3 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 5 ++ .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 5 ++ .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 5 ++ .../include/asic_reg/gc/gc_10_1_0_offset.h | 14 ++++ .../include/asic_reg/gc/gc_10_1_0_sh_mask.h | 69 +++++++++++++++++++ .../include/asic_reg/gc/gc_10_3_0_offset.h | 10 +++ .../include/asic_reg/gc/gc_10_3_0_sh_mask.h | 4 ++ .../include/asic_reg/gc/gc_11_0_0_sh_mask.h | 4 ++ 12 files changed, 176 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f7ad883a70fa..be984f8c71c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4825,6 +4825,29 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade #define DEFAULT_SH_MEM_BASES (0x6000) +static void gfx_v10_0_debug_trap_config_init(struct amdgpu_device *adev, + uint32_t first_vmid, + uint32_t last_vmid) +{ + uint32_t data; + uint32_t trap_config_vmid_mask = 0; + int i; + + /* Calculate trap config vmid mask */ + for (i = first_vmid; i < last_vmid; i++) + trap_config_vmid_mask |= (1 << i); + + data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG, + VMID_SEL, trap_config_vmid_mask); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG, + TRAP_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0); +} + static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) { int i; @@ -4856,6 +4879,9 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); } + + gfx_v10_0_debug_trap_config_init(adev, adev->vm_manager.first_kfd_vmid, + AMDGPU_NUM_VMID); } static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index da21bf868080..690e121d9dda 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1638,6 +1638,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) /* Enable trap for each kfd vmid. */ data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0189e50bd89f..7f17e0061027 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2303,6 +2303,29 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) adev->gfx.config.num_rbs = hweight32(active_rbs); } +static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev, + uint32_t first_vmid, + uint32_t last_vmid) +{ + uint32_t data; + uint32_t trap_config_vmid_mask = 0; + int i; + + /* Calculate trap config vmid mask */ + for (i = first_vmid; i < last_vmid; i++) + trap_config_vmid_mask |= (1 << i); + + data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG, + VMID_SEL, trap_config_vmid_mask); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG, + TRAP_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0); +} + #define DEFAULT_SH_MEM_BASES (0x6000) static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) { @@ -4602,6 +4625,13 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + gfx_v9_4_2_debug_trap_config_init(adev, + adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID); + else + gfx_v9_0_debug_trap_config_init(adev, + adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 2cc3a7cb1f54..63f6843a069e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -771,6 +771,9 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA1), 0); } void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 772c09b5821b..eaaa4f4ddaaa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -117,6 +117,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the + * DISPATCH_PTR. This is required for the kfd debugger + */ + m->cp_hqd_hq_scheduler0 = 1 << 14; + if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 632344b95d90..3a48bbc589fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -143,6 +143,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the + * DISPATCH_PTR. This is required for the kfd debugger + */ + m->cp_hqd_hq_status0 = 1 << 14; + /* * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support * acknowledgment. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 226132ec3714..b7c95158d4a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -182,6 +182,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the + * DISPATCH_PTR. This is required for the kfd debugger + */ + m->cp_hqd_hq_status0 = 1 << 14; + if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h index 79c41004c0b6..4908044f7409 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h @@ -5194,6 +5194,20 @@ #define mmSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0 #define mmSPI_WCL_PIPE_PERCENT_CS7 0x1f70 #define mmSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL 0x1f71 +#define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_CONFIG 0x1f72 +#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_MASK 0x1f73 +#define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL2 0x1f74 +#define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL3 0x1f75 +#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA0 0x1f78 +#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA1 0x1f79 +#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0 #define mmSPI_COMPUTE_QUEUE_RESET 0x1f7b #define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0 #define mmSPI_RESOURCE_RESERVE_CU_0 0x1f7c diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h index 52043e143067..9b7d219e7954 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h @@ -19700,6 +19700,75 @@ //SPI_WCL_PIPE_PERCENT_CS7 #define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0 #define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL +//SPI_GDBG_WAVE_CNTL +#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0 +#define SPI_GDBG_WAVE_CNTL__STALL_VMID__SHIFT 0x1 +#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L +#define SPI_GDBG_WAVE_CNTL__STALL_VMID_MASK 0x0001FFFEL +//SPI_GDBG_TRAP_CONFIG +#define SPI_GDBG_TRAP_CONFIG__ME_SEL__SHIFT 0x0 +#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL__SHIFT 0x2 +#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL__SHIFT 0x4 +#define SPI_GDBG_TRAP_CONFIG__ME_MATCH__SHIFT 0x7 +#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH__SHIFT 0x8 +#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH__SHIFT 0x9 +#define SPI_GDBG_TRAP_CONFIG__TRAP_EN__SHIFT 0xf +#define SPI_GDBG_TRAP_CONFIG__VMID_SEL__SHIFT 0x10 +#define SPI_GDBG_TRAP_CONFIG__ME_SEL_MASK 0x00000003L +#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL_MASK 0x0000000CL +#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL_MASK 0x00000070L +#define SPI_GDBG_TRAP_CONFIG__ME_MATCH_MASK 0x00000080L +#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH_MASK 0x00000100L +#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH_MASK 0x00000200L +#define SPI_GDBG_TRAP_CONFIG__TRAP_EN_MASK 0x00008000L +#define SPI_GDBG_TRAP_CONFIG__VMID_SEL_MASK 0xFFFF0000L +//SPI_GDBG_TRAP_MASK +#define SPI_GDBG_TRAP_MASK__EXCP_EN__SHIFT 0x0 +#define SPI_GDBG_TRAP_MASK__REPLACE__SHIFT 0x9 +#define SPI_GDBG_TRAP_MASK__EXCP_EN_MASK 0x01FFL +#define SPI_GDBG_TRAP_MASK__REPLACE_MASK 0x0200L +//SPI_GDBG_WAVE_CNTL2 +#define SPI_GDBG_WAVE_CNTL2__VMID_MASK__SHIFT 0x0 +#define SPI_GDBG_WAVE_CNTL2__MODE__SHIFT 0x10 +#define SPI_GDBG_WAVE_CNTL2__VMID_MASK_MASK 0x0000FFFFL +#define SPI_GDBG_WAVE_CNTL2__MODE_MASK 0x00030000L +//SPI_GDBG_WAVE_CNTL3 +#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0 +#define SPI_GDBG_WAVE_CNTL3__STALL_VS__SHIFT 0x1 +#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2 +#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3 +#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa +#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb +#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc +#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd +#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c +#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L +#define SPI_GDBG_WAVE_CNTL3__STALL_VS_MASK 0x00000002L +#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L +#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L +#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L +#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L +#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L +//SPI_GDBG_TRAP_DATA0 +#define SPI_GDBG_TRAP_DATA0__DATA__SHIFT 0x0 +#define SPI_GDBG_TRAP_DATA0__DATA_MASK 0xFFFFFFFFL +//SPI_GDBG_TRAP_DATA1 +#define SPI_GDBG_TRAP_DATA1__DATA__SHIFT 0x0 +#define SPI_GDBG_TRAP_DATA1__DATA_MASK 0xFFFFFFFFL //SPI_COMPUTE_QUEUE_RESET #define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0 #define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h index a734abaa91a5..5e15ac14b63c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h @@ -26,6 +26,8 @@ #define mmSQ_DEBUG_STS_GLOBAL_BASE_IDX 0 #define mmSQ_DEBUG_STS_GLOBAL2 0x10B0 #define mmSQ_DEBUG_STS_GLOBAL2_BASE_IDX 0 +#define mmSQ_DEBUG 0x10B1 +#define mmSQ_DEBUG_BASE_IDX 0 // addressBlock: gc_sdma0_sdma0dec // base address: 0x4980 @@ -4853,10 +4855,18 @@ #define mmSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0 #define mmSPI_GDBG_WAVE_CNTL 0x1f71 #define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_CONFIG 0x1f72 +#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0 #define mmSPI_GDBG_TRAP_MASK 0x1f73 #define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0 #define mmSPI_GDBG_WAVE_CNTL2 0x1f74 #define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL3 0x1f75 +#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA0 0x1f78 +#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA1 0x1f79 +#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0 #define mmSPI_COMPUTE_QUEUE_RESET 0x1f7b #define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0 #define mmSPI_RESOURCE_RESERVE_CU_0 0x1f7c diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h index d7a17bae2584..e4ecd6c2d20e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h @@ -47907,6 +47907,10 @@ // addressBlock: sqind +//SQ_DEBUG +#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L +#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x00000000 + //SQ_DEBUG_STS_GLOBAL #define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0x000000ffL #define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x00000000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h index 4f08f90856fc..3088a4a13cb5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h @@ -17216,11 +17216,15 @@ #define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3 #define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4 #define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START__SHIFT 0xe +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END__SHIFT 0xf #define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x00000001L #define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x00000006L #define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x00000008L #define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x00001FF0L #define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x00002000L +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START_MASK 0x00004000L +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END_MASK 0x00008000L //SPI_COMPUTE_QUEUE_RESET #define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0 #define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L From 257d7b7be26d83768cb07585480d90e875365d5c Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 30 May 2023 12:44:30 -0500 Subject: [PATCH 745/861] drm/amd: Make lack of `ACPI_FADT_LOW_POWER_S0` or `CONFIG_AMD_PMC` louder during suspend path Users have reported that s2idle wasn't working on OEM Phoenix systems, but it was root caused to be because `CONFIG_AMD_PMC` wasn't set in the distribution kernel config. To make this more apparent, raise the messaging to err instead of warn. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217497 Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 73e4434d9b54..385c6acb5728 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1500,14 +1500,14 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) * in that case. */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { - dev_warn_once(adev->dev, + dev_err_once(adev->dev, "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); return false; } #if !IS_ENABLED(CONFIG_AMD_PMC) - dev_warn_once(adev->dev, + dev_err_once(adev->dev, "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); return false; #else From 34941e5dc72daff878267c55a9bdb5e055804953 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 09:03:27 +0530 Subject: [PATCH 746/861] drm/amd/display: Drop unused DCN_BASE variable in dcn314_resource.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following W=1 kernel build warning: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn314/dcn314_resource.c:128:29: warning: ‘DCN_BASE’ defined but not used [-Wunused-const-variable=] 128 | static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0, 0, 0 } }, | ^~~~~~~~ Suggested-by: Roman Li Cc: Hamza Mahfooz Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Roman Li Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn314/dcn314_resource.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 3592efcc7fae..837884c4f03a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -117,23 +117,6 @@ #define regBIF_BX2_BIOS_SCRATCH_6 0x003e #define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 -struct IP_BASE_INSTANCE { - unsigned int segment[MAX_SEGMENT]; -}; - -struct IP_BASE { - struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; -}; - -static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0, 0, 0 } } } }; - - #define DC_LOGGER_INIT(logger) enum dcn31_clk_src_array_id { From d3116d9f27b89d363dd528e42fcf4895a15e0c3c Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 31 May 2023 10:08:11 +0800 Subject: [PATCH 747/861] drm/amdkfd: clean up one inconsistent indenting drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_device.c:1036 kgd2kfd_interrupt() warn: inconsistent indenting Signed-off-by: Yang Li Signed-off-by: Felix Kuehling Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e84ad1c5ef44..f0ed6e6416c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1042,7 +1042,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) is_patched ? patched_ihre : ih_ring_entry)) { kfd_queue_work(node->ih_wq, &node->interrupt_work); spin_unlock_irqrestore(&node->interrupt_lock, flags); - return; + return; } spin_unlock_irqrestore(&node->interrupt_lock, flags); } From cde2e087a320bff5d772d82c9fbddaea18daa94a Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 30 Mar 2022 15:09:11 -0400 Subject: [PATCH 748/861] drm/amdgpu: add gfx9 hw debug mode enable and disable calls Implement the per-device calls to enable or disable HW debug mode for GFX9 prior to GFX9.4.1. GFX9.4.1 and onward will require their own enable/disable sequence as follow on patches. When hardware debug mode setting is requested, waves will inherit these settings in the Shader Processor Input's (SPI) Sequencer Global Block (SQG). This means that the KGD must drain all waves from the SPI into SQG (approximately 96 SPI clock cycles) prior to debug mode setting to ensure that the order of operations that the debugger expects with regards to debug mode setting transaction requests and wave inheritence of that mode is upheld. Also ensure that exception overrides are reset to their original state prior to debug enable or disable. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 92 +++++++++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 9 ++ 2 files changed, 101 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 34bf030f3137..8d7d04704b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -649,6 +649,96 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, return 0; } +/* + * GFX9 helper for wave launch stall requirements on debug trap setting. + * + * vmid: + * Target VMID to stall/unstall. + * + * stall: + * 0-unstall wave launch (enable), 1-stall wave launch (disable). + * After wavefront launch has been stalled, allocated waves must drain from + * SPI in order for debug trap settings to take effect on those waves. + * This is roughly a ~96 clock cycle wait on SPI where a read on + * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles. + * KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required. + * + * NOTE: We can afford to clear the entire STALL_VMID field on unstall + * because GFX9.4.1 cannot support multi-process debugging due to trap + * configuration and masking being limited to global scope. Always assume + * single process conditions. + */ +#define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY 3 +void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev, + uint32_t vmid, + bool stall) +{ + int i; + uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID, + stall ? 1 << vmid : 0); + else + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, + stall ? 1 : 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); + + if (!stall) + return; + + for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++) + RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); +} + +/* + * restore_dbg_registers is ignored here but is a general interface requirement + * for devices that support GFXOFF and where the RLC save/restore list + * does not support hw registers for debugging i.e. the driver has to manually + * initialize the debug mode registers after it has disabled GFX off during the + * debug session. + */ +uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid) +{ + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +/* + * keep_trap_enabled is ignored here but is a general interface requirement + * for devices that support multi-process debugging where the performance + * overhead from trap temporary setup needs to be bypassed when the debug + * session has ended. + */ +uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -877,6 +967,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .enable_debug_trap = kgd_gfx_v9_enable_debug_trap, + .disable_debug_trap = kgd_gfx_v9_disable_debug_trap, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index a241299f4fbc..9588ff055393 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -64,3 +64,12 @@ void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id); void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst); +void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev, + uint32_t vmid, + bool stall); +uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid); +uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid); From 01f648202c5390f4c366793b34c27cddad4ca8d7 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 30 Mar 2022 15:31:00 -0400 Subject: [PATCH 749/861] drm/amdgpu: add gfx9.4.1 hw debug mode enable and disable calls On GFX9.4.1, the implicit wait count instruction on s_barrier is disabled by default in the driver during normal operation for performance requirements. There is a hardware bug in GFX9.4.1 where if the implicit wait count instruction after an s_barrier instruction is disabled, any wave that hits an exception may step over the s_barrier when returning from the trap handler with the barrier logic having no ability to be aware of this, thereby causing other waves to wait at the barrier indefinitely resulting in a shader hang. This bug has been corrected for GFX9.4.2 and onward. Since the debugger subscribes to hardware exceptions, in order to avoid this bug, the debugger must enable implicit wait count on s_barrier for a debug session and disable it on detach. In order to change this setting in the in the device global SQ_CONFIG register, the GFX pipeline must be idle. GFX9.4.1 as a compute device will either dispatch work through the compute ring buffers used for image post processing or through the hardware scheduler by the KFD. Have the KGD suspend and drain the compute ring buffer, then suspend the hardware scheduler and block any future KFD process job requests before changing the implicit wait count setting. Once set, resume all work. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 + .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 116 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 +- 3 files changed, 121 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8b4a89a6c3fd..74104d46a7be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1046,6 +1046,9 @@ struct amdgpu_device { struct pci_saved_state *pci_state; pci_channel_state_t pci_channel_state; + /* Track auto wait count on s_barrier settings */ + bool barrier_has_auto_waitcnt; + struct amdgpu_reset_control *reset_cntl; uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 4191af5a3f13..d2918e5c0dea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd_arcturus.h" +#include "amdgpu_reset.h" #include "sdma0/sdma0_4_2_2_offset.h" #include "sdma0/sdma0_4_2_2_sh_mask.h" #include "sdma1/sdma1_4_2_2_offset.h" @@ -48,6 +49,8 @@ #include "amdgpu_amdkfd_gfx_v9.h" #include "gfxhub_v1_0.h" #include "mmhub_v9_4.h" +#include "gc/gc_9_0_offset.h" +#include "gc/gc_9_0_sh_mask.h" #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -276,6 +279,117 @@ int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, return 0; } +/* + * Helper used to suspend/resume gfx pipe for image post process work to set + * barrier behaviour. + */ +static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool suspend) +{ + int i, r = 0; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + if (!(ring && ring->sched.thread)) + continue; + + /* stop secheduler and drain ring. */ + if (suspend) { + drm_sched_stop(&ring->sched, NULL); + r = amdgpu_fence_wait_empty(ring); + if (r) + goto out; + } else { + drm_sched_start(&ring->sched, false); + } + } + +out: + /* return on resume or failure to drain rings. */ + if (!suspend || r) + return r; + + return amdgpu_device_ip_wait_for_idle(adev, GC_HWIP); +} + +static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_waitcnt) +{ + uint32_t data; + + WRITE_ONCE(adev->barrier_has_auto_waitcnt, enable_waitcnt); + + if (!down_read_trylock(&adev->reset_domain->sem)) + return; + + amdgpu_amdkfd_suspend(adev, false); + + if (suspend_resume_compute_scheduler(adev, true)) + goto out; + + data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG)); + data = REG_SET_FIELD(data, SQ_CONFIG, DISABLE_BARRIER_WAITCNT, + !enable_waitcnt); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG), data); + +out: + suspend_resume_compute_scheduler(adev, false); + + amdgpu_amdkfd_resume(adev, false); + + up_read(&adev->reset_domain->sem); +} + +/* + * restore_dbg_registers is ignored here but is a general interface requirement + * for devices that support GFXOFF and where the RLC save/restore list + * does not support hw registers for debugging i.e. the driver has to manually + * initialize the debug mode registers after it has disabled GFX off during the + * debug session. + */ +static uint32_t kgd_arcturus_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid) +{ + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true); + + set_barrier_auto_waitcnt(adev, true); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +/* + * keep_trap_enabled is ignored here but is a general interface requirement + * for devices that support multi-process debugging where the performance + * overhead from trap temporary setup needs to be bypassed when the debug + * session has ended. + */ +static uint32_t kgd_arcturus_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true); + + set_barrier_auto_waitcnt(adev, false); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} const struct kfd2kgd_calls arcturus_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -294,6 +408,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .enable_debug_trap = kgd_arcturus_enable_debug_trap, + .disable_debug_trap = kgd_arcturus_disable_debug_trap, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7f17e0061027..f092a1dbdb56 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2390,8 +2390,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); - tmp = REG_SET_FIELD(tmp, SQ_CONFIG, - DISABLE_BARRIER_WAITCNT, 1); + tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT, + !READ_ONCE(adev->barrier_has_auto_waitcnt)); WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); break; default: From bb13d763f251c28b08d996671c5146a2113fc9e7 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 24 Mar 2023 16:19:27 -0400 Subject: [PATCH 750/861] drm/amdkfd: fix kfd_suspend_all_processes Flush delayed restore work in kfd_suspend_all_queues instead of cancelling. Cancelling the work before it runs results in the queues becoming permanently disabled. Flushing the work ensures that the queue suspend/resume state stays balanced. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ef67f5e37c6f..d75dac92775c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -2014,7 +2014,7 @@ void kfd_suspend_all_processes(void) WARN(debug_evictions, "Evicting all processes"); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { cancel_delayed_work_sync(&p->eviction_work); - cancel_delayed_work_sync(&p->restore_work); + flush_delayed_work(&p->restore_work); if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) pr_err("Failed to suspend process 0x%x\n", p->pasid); From d13f050fee94a454323f864fb005c4355600cdbd Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 31 Mar 2022 13:14:01 -0400 Subject: [PATCH 751/861] drm/amdgpu: add gfx10 hw debug mode enable and disable calls Similar to GFX9 debug devices, set the hardware debug mode by draining the SPI appropriately prior the mode setting request. Because GFX10 has waves allocated by the work group boundary and each SE's SPI instances do not communicate, the SPI drain time is much longer. This long drain time will be fixed for GFX11 onwards. Also remove a bunch of deprecated misplaced references for GFX10.3. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 96 ++++++++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 28 ++++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 148 +----------------- 3 files changed, 127 insertions(+), 145 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 7b60268d93c0..240f5006e278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -21,6 +21,7 @@ */ #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_amdkfd_gfx_v10.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" #include "athub/athub_2_0_0_offset.h" @@ -709,6 +710,99 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev, adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } +/* + * GFX10 helper for wave launch stall requirements on debug trap setting. + * + * vmid: + * Target VMID to stall/unstall. + * + * stall: + * 0-unstall wave launch (enable), 1-stall wave launch (disable). + * After wavefront launch has been stalled, allocated waves must drain from + * SPI in order for debug trap settings to take effect on those waves. + * This is roughly a ~3500 clock cycle wait on SPI where a read on + * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles. + * KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required. + * + * NOTE: We can afford to clear the entire STALL_VMID field on unstall + * because current GFX10 chips cannot support multi-process debugging due to + * trap configuration and masking being limited to global scope. Always + * assume single process conditions. + * + */ + +#define KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY 110 +static void kgd_gfx_v10_set_wave_launch_stall(struct amdgpu_device *adev, uint32_t vmid, bool stall) +{ + uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + int i; + + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID, + stall ? 1 << vmid : 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); + + if (!stall) + return; + + for (i = 0; i < KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++) + RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); +} + +uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid) +{ + + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true); + + /* assume gfx off is disabled for the debug session if rlc restore not supported. */ + if (restore_dbg_registers) { + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG, + VMID_SEL, 1 << vmid); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG, + TRAP_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; + } + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) @@ -752,5 +846,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = set_vm_context_page_table_base, + .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, + .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, .program_trap_handler_settings = program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h new file mode 100644 index 000000000000..251d61fbde07 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid); +uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 52d0d35fb58d..8b293f3dcbd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -22,6 +22,7 @@ #include #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_amdkfd_gfx_v10.h" #include "gc/gc_10_3_0_offset.h" #include "gc/gc_10_3_0_sh_mask.h" #include "oss/osssys_5_0_0_offset.h" @@ -654,143 +655,6 @@ static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, unlock_srbm(adev); } -#if 0 -uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev, - uint32_t trap_debug_wave_launch_mode, - uint32_t vmid) -{ - uint32_t data = 0; - uint32_t orig_wave_cntl_value; - uint32_t orig_stall_vmid; - - mutex_lock(&adev->grbm_idx_mutex); - - orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC, - 0, - mmSPI_GDBG_WAVE_CNTL)); - orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value, - SPI_GDBG_WAVE_CNTL, - STALL_VMID); - - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); - - data = 0; - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid); - - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} - -uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev) -{ - mutex_lock(&adev->grbm_idx_mutex); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); - - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} - -uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev, - uint32_t trap_override, - uint32_t trap_mask) -{ - uint32_t data = 0; - - mutex_lock(&adev->grbm_idx_mutex); - - data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); - - data = 0; - data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, - EXCP_EN, trap_mask); - data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, - REPLACE, trap_override); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); - - data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); - - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} - -uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev, - uint8_t wave_launch_mode, - uint32_t vmid) -{ - uint32_t data = 0; - bool is_stall_mode; - bool is_mode_set; - - is_stall_mode = (wave_launch_mode == 4); - is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4); - - mutex_lock(&adev->grbm_idx_mutex); - - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, - VMID_MASK, is_mode_set ? 1 << vmid : 0); - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, - MODE, is_mode_set ? wave_launch_mode : 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); - - data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, - STALL_VMID, is_stall_mode ? 1 << vmid : 0); - data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, - STALL_RA, is_stall_mode ? 1 : 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); - - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} - -/* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values - * The values read are: - * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. - * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. - * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. - * gws_wait_time -- Wait Count for Global Wave Syncs. - * que_sleep_wait_time -- Wait Count for Dequeue Retry. - * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. - * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. - * deq_retry_wait_time -- Wait Count for Global Wave Syncs. - */ -void get_iq_wait_times_v10_3(struct amdgpu_device *adev, - uint32_t *wait_times, uint32_t inst) - -{ - *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); -} - -void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev, - uint32_t wait_times, - uint32_t grace_period, - uint32_t *reg_offset, - uint32_t *reg_data, - uint32_t inst) -{ - *reg_data = wait_times; - - *reg_data = REG_SET_FIELD(*reg_data, - CP_IQ_WAIT_TIME2, - SCH_WAVE, - grace_period); - - *reg_offset = mmCP_IQ_WAIT_TIME2; -} -#endif - const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v10_3, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3, @@ -808,12 +672,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, .program_trap_handler_settings = program_trap_handler_settings_v10_3, -#if 0 - .enable_debug_trap = enable_debug_trap_v10_3, - .disable_debug_trap = disable_debug_trap_v10_3, - .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3, - .set_wave_launch_mode = set_wave_launch_mode_v10_3, - .get_iq_wait_times = get_iq_wait_times_v10_3, - .build_grace_period_packet_info = build_grace_period_packet_info_v10_3, -#endif + .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, + .disable_debug_trap = kgd_gfx_v10_disable_debug_trap }; From be6f94039e1a91df4b0efc2da9167ca9c17bb532 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 1 Apr 2022 13:31:57 -0400 Subject: [PATCH 752/861] drm/amdgpu: add gfx9.4.2 hw debug mode enable and disable calls GFX9.4.2 now supports per-VMID debug mode controls registers (SPI_GDBG_PER_VMID_CNTL). Because the KFD lets the HWS handle PASID-VMID mapping, the KFD will forward all debug mode setting register writes to the HWS scheduler using a new MAP_PROCESS API, so instead of writing to registers, return the required register values that the HWS needs to write on debug enable and disable. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 42 ++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 4485bb29bec9..a6f98141c29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -23,6 +23,44 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd_arcturus.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "gc/gc_9_4_2_offset.h" +#include "gc/gc_9_4_2_sh_mask.h" + +/* + * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. + * + * restore_dbg_registers is ignored here but is a general interface requirement + * for devices that support GFXOFF and where the RLC save/restore list + * does not support hw registers for debugging i.e. the driver has to manually + * initialize the debug mode registers after it has disabled GFX off during the + * debug session. + */ +static uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); + + return data; +} + +/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_aldebaran_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); + + return data; +} const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, @@ -42,5 +80,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, - .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings + .enable_debug_trap = kgd_aldebaran_enable_debug_trap, + .disable_debug_trap = kgd_aldebaran_disable_debug_trap, + .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; From 33f3437ae1194ef5dedbf275dcf74ed9c114647d Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 26 Aug 2022 22:35:50 -0400 Subject: [PATCH 753/861] drm/amdgpu: add gfx11 hw debug mode enable and disable calls Implement the per-device calls to enable or disable HW debug mode for GFX11. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 7deff8a547fb..cc954cf248ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -607,6 +607,42 @@ static void set_vm_context_page_table_base_v11(struct amdgpu_device *adev, adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } +/* + * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. + * + * restore_dbg_registers is ignored here but is a general interface requirement + * for devices that support GFXOFF and where the RLC save/restore list + * does not support hw registers for debugging i.e. the driver has to manually + * initialize the debug mode registers after it has disabled GFX off during the + * debug session. + */ +static uint32_t kgd_gfx_v11_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); + + return data; +} + +/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); + + return data; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -623,4 +659,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .wave_control_execute = wave_control_execute_v11, .get_atc_vmid_pasid_mapping_info = NULL, .set_vm_context_page_table_base = set_vm_context_page_table_base_v11, + .enable_debug_trap = kgd_gfx_v11_enable_debug_trap, + .disable_debug_trap = kgd_gfx_v11_disable_debug_trap }; From 7cee6a6824a0429a6255abe91b5af01b9a01cd03 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 23 Mar 2023 17:17:20 -0400 Subject: [PATCH 754/861] drm/amdgpu: add configurable grace period for unmap queues The HWS schedule allows a grace period for wave completion prior to preemption for better performance by avoiding CWSR on waves that can potentially complete quickly. The debugger, on the other hand, will want to inspect wave status immediately after it actively triggers preemption (a suspend function to be provided). To minimize latency between preemption and debugger wave inspection, allow immediate preemption by setting the grace period to 0. Note that setting the preepmtion grace period to 0 will result in an infinite grace period being set due to a CP FW bug so set it to 1 for now. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 2 + .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 + .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 43 ++++++++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 6 ++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 2 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 43 ++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 8 ++- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 63 +++++++++++++----- .../drm/amd/amdkfd/kfd_device_queue_manager.h | 3 + .../gpu/drm/amd/amdkfd/kfd_packet_manager.c | 32 +++++++++ .../drm/amd/amdkfd/kfd_packet_manager_v9.c | 39 +++++++++++ .../drm/amd/amdkfd/kfd_packet_manager_vi.c | 2 + .../gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 65 +++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 ++ 14 files changed, 295 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index a6f98141c29c..b811a0985050 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -82,5 +82,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .enable_debug_trap = kgd_aldebaran_enable_debug_trap, .disable_debug_trap = kgd_aldebaran_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index d2918e5c0dea..a62bd0068515 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -410,6 +410,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_arcturus_enable_debug_trap, .disable_debug_trap = kgd_arcturus_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 240f5006e278..98006c7021dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -803,6 +803,47 @@ uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, return 0; } +/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times) + +{ + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + +void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + /* + * The CP cannont handle a 0 grace period input and will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (grace_period == 0) + grace_period = 1; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); +} + static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) @@ -848,5 +889,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .set_vm_context_page_table_base = set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 251d61fbde07..1e993a213646 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -26,3 +26,9 @@ uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 8b293f3dcbd2..387bdf4823c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -672,6 +672,8 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, .program_trap_handler_settings = program_trap_handler_settings_v10_3, + .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 8d7d04704b00..829ee720cc44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -739,6 +739,24 @@ uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, return 0; } +/* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times) + +{ + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -926,6 +944,29 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, adev->gfx.cu_info.max_waves_per_simd; } +void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + /* + * The CP cannont handle a 0 grace period input and will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (grace_period == 0) + grace_period = 1; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); +} + void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) { @@ -969,6 +1010,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v9_enable_debug_trap, .disable_debug_trap = kgd_gfx_v9_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 9588ff055393..fed5b7f18b1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - - void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, @@ -73,3 +71,9 @@ uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2baa0781eafc..0b88a64e61fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -46,10 +46,13 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param); + uint32_t filter_param, + uint32_t grace_period); static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset); + uint32_t filter_param, + uint32_t grace_period, + bool reset); static int map_queues_cpsch(struct device_queue_manager *dqm); @@ -866,7 +869,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else if (prev_active) retval = remove_queue_mes(dqm, q, &pdd->qpd); @@ -1042,7 +1045,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); out: dqm_unlock(dqm); @@ -1182,8 +1186,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; atomic64_add(eviction_duration, &pdd->evict_duration_counter); vm_not_acquired: @@ -1525,6 +1528,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm) init_sdma_bitmaps(dqm); + if (dqm->dev->kfd2kgd->get_iq_wait_times) + dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, + &dqm->wait_times); return 0; } @@ -1563,8 +1569,9 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; + if (!dqm->dev->kfd->shared_resources.enable_mes) - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return 0; @@ -1589,7 +1596,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) if (!dqm->is_hws_hang) { if (!dqm->dev->kfd->shared_resources.enable_mes) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else remove_all_queues_mes(dqm); } @@ -1631,7 +1638,8 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, list_add(&kq->list, &qpd->priv_queue_list); increment_queue_count(dqm, qpd, kq->queue); qpd->is_debug = true; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return 0; @@ -1645,7 +1653,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, list_del(&kq->list); decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); /* * Unconditionally decrement this counter, regardless of the queue's * type. @@ -1722,7 +1731,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); else retval = add_queue_mes(dqm, q, qpd); if (retval) @@ -1811,7 +1820,9 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) /* dqm->lock mutex has to be locked before calling this function */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset) + uint32_t filter_param, + uint32_t grace_period, + bool reset) { int retval = 0; struct mqd_manager *mqd_mgr; @@ -1823,6 +1834,12 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->active_runlist) return retval; + if (grace_period != USE_DEFAULT_GRACE_PERIOD) { + retval = pm_update_grace_period(&dqm->packet_mgr, grace_period); + if (retval) + return retval; + } + retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset); if (retval) return retval; @@ -1855,6 +1872,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return -ETIME; } + /* We need to reset the grace period value for this device */ + if (grace_period != USE_DEFAULT_GRACE_PERIOD) { + if (pm_update_grace_period(&dqm->packet_mgr, + USE_DEFAULT_GRACE_PERIOD)) + pr_err("Failed to reset grace period\n"); + } + pm_release_ib(&dqm->packet_mgr); dqm->active_runlist = false; @@ -1870,7 +1894,7 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, dqm_lock(dqm); retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID, - pasid, true); + pasid, USE_DEFAULT_GRACE_PERIOD, true); dqm_unlock(dqm); return retval; @@ -1879,13 +1903,14 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, /* dqm->lock mutex has to be locked before calling this function */ static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param) + uint32_t filter_param, + uint32_t grace_period) { int retval; if (dqm->is_hws_hang) return -EIO; - retval = unmap_queues_cpsch(dqm, filter, filter_param, false); + retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false); if (retval) return retval; @@ -1943,7 +1968,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); if (retval == -ETIME) qpd->reset_wavefronts = true; } else { @@ -2228,7 +2254,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } if (!dqm->dev->kfd->shared_resources.enable_mes) - retval = execute_queues_cpsch(dqm, filter, 0); + retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); @@ -2589,7 +2615,8 @@ int dqm_debugfs_hang_hws(struct device_queue_manager *dqm) return r; } dqm->active_runlist = true; - r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index cd4383bb207f..d4dd3b4acbf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -37,6 +37,7 @@ #define KFD_MES_PROCESS_QUANTUM 100000 #define KFD_MES_GANG_QUANTUM 10000 +#define USE_DEFAULT_GRACE_PERIOD 0xffffffff struct device_process_node { struct qcm_process_device *qpd; @@ -259,6 +260,8 @@ struct device_queue_manager { /* used for GFX 9.4.3 only */ uint32_t current_logical_xcc_start; + + uint32_t wait_times; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 2f54172e9175..401096c103b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -370,6 +370,38 @@ out: return retval; } +int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) +{ + int retval = 0; + uint32_t *buffer, size; + + size = pm->pmf->set_grace_period_size; + + mutex_lock(&pm->lock); + + if (size) { + kq_acquire_packet_buffer(pm->priv_queue, + size / sizeof(uint32_t), + (unsigned int **)&buffer); + + if (!buffer) { + pr_err("Failed to allocate buffer on kernel queue\n"); + retval = -ENOMEM; + goto out; + } + + retval = pm->pmf->set_grace_period(pm, buffer, grace_period); + if (!retval) + kq_submit_packet(pm->priv_queue); + else + kq_rollback_packet(pm->priv_queue); + } + +out: + mutex_unlock(&pm->lock); + return retval; +} + int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_unmap_queues_filter filter, uint32_t filter_param, bool reset) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 44cf3a5f6fdb..1fda6dcf84b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -262,6 +262,41 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } +static int pm_set_grace_period_v9(struct packet_manager *pm, + uint32_t *buffer, + uint32_t grace_period) +{ + struct pm4_mec_write_data_mmio *packet; + uint32_t reg_offset = 0; + uint32_t reg_data = 0; + + pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( + pm->dqm->dev->adev, + pm->dqm->wait_times, + grace_period, + ®_offset, + ®_data); + + if (grace_period == USE_DEFAULT_GRACE_PERIOD) + reg_data = pm->dqm->wait_times; + + packet = (struct pm4_mec_write_data_mmio *)buffer; + memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio)); + + packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA, + sizeof(struct pm4_mec_write_data_mmio)); + + packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register; + packet->bitfields2.addr_incr = + addr_incr___write_data__do_not_increment_address; + + packet->bitfields3.dst_mmreg_addr = reg_offset; + + packet->data = reg_data; + + return 0; +} + static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter filter, uint32_t filter_param, bool reset) @@ -345,6 +380,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, + .set_grace_period = pm_set_grace_period_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -352,6 +388,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; @@ -362,6 +399,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, + .set_grace_period = pm_set_grace_period_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process_aldebaran), @@ -369,6 +407,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index faf4772ed317..c1199d06d131 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -303,6 +303,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources = pm_set_resources_vi, .map_queues = pm_map_queues_vi, .unmap_queues = pm_unmap_queues_vi, + .set_grace_period = NULL, .query_status = pm_query_status_vi, .release_mem = pm_release_mem_vi, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -310,6 +311,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = 0, .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = sizeof(struct pm4_mec_release_mem) }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 2ad708c64012..206f1960857f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -584,6 +584,71 @@ struct pm4_mec_release_mem { #endif +#ifndef PM4_MEC_WRITE_DATA_DEFINED +#define PM4_MEC_WRITE_DATA_DEFINED + +enum WRITE_DATA_dst_sel_enum { + dst_sel___write_data__mem_mapped_register = 0, + dst_sel___write_data__tc_l2 = 2, + dst_sel___write_data__gds = 3, + dst_sel___write_data__memory = 5, + dst_sel___write_data__memory_mapped_adc_persistent_state = 6, +}; + +enum WRITE_DATA_addr_incr_enum { + addr_incr___write_data__increment_address = 0, + addr_incr___write_data__do_not_increment_address = 1 +}; + +enum WRITE_DATA_wr_confirm_enum { + wr_confirm___write_data__do_not_wait_for_write_confirmation = 0, + wr_confirm___write_data__wait_for_write_confirmation = 1 +}; + +enum WRITE_DATA_cache_policy_enum { + cache_policy___write_data__lru = 0, + cache_policy___write_data__stream = 1 +}; + + +struct pm4_mec_write_data_mmio { + union { + union PM4_MES_TYPE_3_HEADER header; /*header */ + unsigned int ordinal1; + }; + + union { + struct { + unsigned int reserved1:8; + unsigned int dst_sel:4; + unsigned int reserved2:4; + unsigned int addr_incr:1; + unsigned int reserved3:2; + unsigned int resume_vf:1; + unsigned int wr_confirm:1; + unsigned int reserved4:4; + unsigned int cache_policy:2; + unsigned int reserved5:5; + } bitfields2; + unsigned int ordinal2; + }; + + union { + struct { + unsigned int dst_mmreg_addr:18; + unsigned int reserved6:14; + } bitfields3; + unsigned int ordinal3; + }; + + uint32_t reserved7; + + uint32_t data; + +}; + +#endif + enum { CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014 }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1b272f879b4c..4c912b7735b5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1350,6 +1350,8 @@ struct packet_manager_funcs { int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter mode, uint32_t filter_param, bool reset); + int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, + uint32_t grace_period); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); @@ -1360,6 +1362,7 @@ struct packet_manager_funcs { int set_resources_size; int map_queues_size; int unmap_queues_size; + int set_grace_period_size; int query_status_size; int release_mem_size; }; @@ -1382,6 +1385,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, void pm_release_ib(struct packet_manager *pm); +int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); + /* Following PM funcs can be shared among VI and AI */ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); From 97ae3c8cce96f3bebf883d0812cef5d3fdbe3e64 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 4 Apr 2022 12:27:43 -0400 Subject: [PATCH 755/861] drm/amdkfd: prepare map process for single process debug devices Older HW only supports debugging on a single process because the SPI debug mode setting registers are device global. The HWS has supplied a single pinned VMID (0xf) for MAP_PROCESS for debug purposes. To pin the VMID, the KFD will remove the VMID from the HWS dynamic VMID allocation via SET_RESOUCES so that a debugged process will never migrate away from its pinned VMID. The KFD is responsible for reserving and releasing this pinned VMID accordingly whenever the debugger attaches and detaches respectively. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 93 +++++++++++++++++++ .../drm/amd/amdkfd/kfd_device_queue_manager.h | 5 + .../drm/amd/amdkfd/kfd_packet_manager_v9.c | 9 ++ .../gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 5 +- 4 files changed, 111 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0b88a64e61fe..cfe5bd59070e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1525,6 +1525,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->gws_queue_count = 0; dqm->active_runlist = false; INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); + dqm->trap_debug_vmid = 0; init_sdma_bitmaps(dqm); @@ -2501,6 +2502,98 @@ static void kfd_process_hw_exception(struct work_struct *work) amdgpu_amdkfd_gpu_reset(dqm->dev->adev); } +int reserve_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int r; + int updated_vmid_mask; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + dqm_lock(dqm); + + if (dqm->trap_debug_vmid != 0) { + pr_err("Trap debug id already reserved\n"); + r = -EBUSY; + goto out_unlock; + } + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + if (r) + goto out_unlock; + + updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd); + + dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; + dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd; + r = set_sched_resources(dqm); + if (r) + goto out_unlock; + + r = map_queues_cpsch(dqm); + if (r) + goto out_unlock; + + pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid); + +out_unlock: + dqm_unlock(dqm); + return r; +} + +/* + * Releases vmid for the trap debugger + */ +int release_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int r; + int updated_vmid_mask; + uint32_t trap_debug_vmid; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + dqm_lock(dqm); + trap_debug_vmid = dqm->trap_debug_vmid; + if (dqm->trap_debug_vmid == 0) { + pr_err("Trap debug id is not reserved\n"); + r = -EINVAL; + goto out_unlock; + } + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + if (r) + goto out_unlock; + + updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd); + + dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; + dqm->trap_debug_vmid = 0; + r = set_sched_resources(dqm); + if (r) + goto out_unlock; + + r = map_queues_cpsch(dqm); + if (r) + goto out_unlock; + + pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid); + +out_unlock: + dqm_unlock(dqm); + return r; +} + #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d4dd3b4acbf0..bf7aa3f84182 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -250,6 +250,7 @@ struct device_queue_manager { struct kfd_mem_obj *fence_mem; bool active_runlist; int sched_policy; + uint32_t trap_debug_vmid; /* hw exception */ bool is_hws_hang; @@ -285,6 +286,10 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm); +int reserve_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +int release_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 1fda6dcf84b1..0fe73dbd28af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -34,6 +34,9 @@ static int pm_map_process_v9(struct packet_manager *pm, { struct pm4_mes_map_process *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; + struct kfd_node *kfd = pm->dqm->dev; + struct kfd_process_device *pdd = + container_of(qpd, struct kfd_process_device, qpd); packet = (struct pm4_mes_map_process *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process)); @@ -49,6 +52,12 @@ static int pm_map_process_v9(struct packet_manager *pm, packet->bitfields14.sdma_enable = 1; packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && + pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { + packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; + packet->bitfields2.new_debug = 1; + } + packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; if (qpd->tba_addr) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 206f1960857f..8b6b2bd5c148 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -146,7 +146,10 @@ struct pm4_mes_map_process { union { struct { uint32_t pasid:16; - uint32_t reserved1:8; + uint32_t reserved1:2; + uint32_t debug_vmid:4; + uint32_t new_debug:1; + uint32_t reserved2:1; uint32_t diq_enable:1; uint32_t process_quantum:7; } bitfields2; From 0de4ec9a03537bd2b189b5afbf83acd6b72b0258 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 4 Apr 2022 13:38:11 -0400 Subject: [PATCH 756/861] drm/amdgpu: prepare map process for multi-process debug devices Unlike single process debug devices, multi-process debug devices allow debug mode setting per-VMID (non-device-global). Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS API allows the KFD to forward the required SPI debug register write requests. To request a new debug mode setting change, the KFD must be able to preempt all queues then remap all queues with these new setting requests for MAP_PROCESS to take effect. Note that by default, trap enablement in non-debug mode must be disabled for performance reasons for multi-process debug devices due to setup overhead in FW. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 5 ++ .../drm/amd/amdkfd/kfd_device_queue_manager.c | 51 +++++++++++++++++++ .../drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++ .../drm/amd/amdkfd/kfd_packet_manager_v9.c | 14 +++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 5 ++ 6 files changed, 87 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index a8abfe2a0a14..db6d72e7930f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -29,4 +29,9 @@ int kfd_dbg_trap_disable(struct kfd_process *target); int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, uint32_t *runtime_info_size); +static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) +{ + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2); +} + #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index cfe5bd59070e..495c9238254e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -36,6 +36,7 @@ #include "kfd_kernel_queue.h" #include "amdgpu_amdkfd.h" #include "mes_api_def.h" +#include "kfd_debug.h" /* Size of the per-pipe EOP queue */ #define CIK_HPD_EOP_BYTES_LOG2 11 @@ -2594,6 +2595,56 @@ out_unlock: return r; } +int debug_lock_and_unmap(struct device_queue_manager *dqm) +{ + int r; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) + return 0; + + dqm_lock(dqm); + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false); + if (r) + dqm_unlock(dqm); + + return r; +} + +int debug_map_and_unlock(struct device_queue_manager *dqm) +{ + int r; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) + return 0; + + r = map_queues_cpsch(dqm); + + dqm_unlock(dqm); + + return r; +} + +int debug_refresh_runlist(struct device_queue_manager *dqm) +{ + int r = debug_lock_and_unmap(dqm); + + if (r) + return r; + + return debug_map_and_unlock(dqm); +} + #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bf7aa3f84182..bb75d93712eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -290,6 +290,9 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int debug_lock_and_unmap(struct device_queue_manager *dqm); +int debug_map_and_unlock(struct device_queue_manager *dqm); +int debug_refresh_runlist(struct device_queue_manager *dqm); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 0fe73dbd28af..29a2d0499b67 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -88,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, { struct pm4_mes_map_process_aldebaran *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; + struct kfd_dev *kfd = pm->dqm->dev->kfd; + struct kfd_process_device *pdd = + container_of(qpd, struct kfd_process_device, qpd); + int i; packet = (struct pm4_mes_map_process_aldebaran *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); @@ -102,6 +106,16 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, packet->bitfields14.num_oac = qpd->num_oac; packet->bitfields14.sdma_enable = 1; packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | + pdd->spi_dbg_launch_mode; + + if (pdd->process->debug_trap_enabled) { + for (i = 0; i < kfd->device_info.num_of_watch_points; i++) + packet->tcp_watch_cntl[i] = pdd->watch_points[i]; + + packet->bitfields2.single_memops = + !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); + } packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4c912b7735b5..8fca7175daab 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -816,6 +816,12 @@ struct kfd_process_device { uint64_t faults; uint64_t page_in; uint64_t page_out; + + /* Tracks debug per-vmid request settings */ + uint32_t spi_dbg_override; + uint32_t spi_dbg_launch_mode; + uint32_t watch_points[4]; + /* * If this process has been checkpointed before, then the user * application will use the original gpu_id on the @@ -952,6 +958,9 @@ struct kfd_process { bool xnack_enabled; + /* Tracks debug per-vmid request for debug flags */ + bool dbg_flags; + atomic_t poison; /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ bool queues_paused; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d75dac92775c..725d936b2cc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1612,6 +1612,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, } p->pdds[p->n_pdds++] = pdd; + if (kfd_dbg_is_per_vmid_supported(pdd->dev)) + pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( + pdd->dev->adev, + false, + 0); /* Init idr used for memory handle translation */ idr_init(&pdd->alloc_idr); From a9818854ea7870ec5464d37b72c89f5fc198708e Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 26 Aug 2022 22:04:15 -0400 Subject: [PATCH 757/861] drm/amdgpu: expose debug api for mes Similar to the F32 HWS, the RS64 HWS for GFX11 now supports a multi-process debug API. The skip_process_ctx_clear ADD_QUEUE requirement is to prevent the MES from clearing the process context when the first queue is added to the scheduler in order to maintain debug mode settings during queue preemption and restore. The MES clears the process context in this case due to an unresolved FW caching bug during normal mode operations. During debug mode, the KFD will hold a reference to the target process so the process context should never go stale and MES can afford to skip this requirement. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 32 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 20 ++++++++++++ drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 12 +++++++ drivers/gpu/drm/amd/include/mes_v11_api_def.h | 21 +++++++++++- 4 files changed, 84 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 49bb6c03d606..20cc3fffe921 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -924,6 +924,38 @@ error: return r; } +int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, + uint64_t process_context_addr, + uint32_t spi_gdbg_per_vmid_cntl, + const uint32_t *tcp_watch_cntl, + uint32_t flags) +{ + struct mes_misc_op_input op_input = {0}; + int r; + + if (!adev->mes.funcs->misc_op) { + DRM_ERROR("mes set shader debugger is not supported!\n"); + return -EINVAL; + } + + op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; + op_input.set_shader_debugger.process_context_addr = process_context_addr; + op_input.set_shader_debugger.flags.u32all = flags; + op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; + memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, + sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); + + amdgpu_mes_lock(&adev->mes); + + r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + if (r) + DRM_ERROR("failed to set_shader_debugger\n"); + + amdgpu_mes_unlock(&adev->mes); + + return r; +} + static void amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 547ec35691fa..d20df0cf0d88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -256,6 +256,7 @@ enum mes_misc_opcode { MES_MISC_OP_READ_REG, MES_MISC_OP_WRM_REG_WAIT, MES_MISC_OP_WRM_REG_WR_WAIT, + MES_MISC_OP_SET_SHADER_DEBUGGER, }; struct mes_misc_op_input { @@ -278,6 +279,20 @@ struct mes_misc_op_input { uint32_t reg0; uint32_t reg1; } wrm_reg; + + struct { + uint64_t process_context_addr; + union { + struct { + uint64_t single_memop : 1; + uint64_t single_alu_op : 1; + uint64_t reserved: 30; + }; + uint32_t u32all; + } flags; + uint32_t spi_gdbg_per_vmid_cntl; + uint32_t tcp_watch_cntl[4]; + } set_shader_debugger; }; }; @@ -340,6 +355,11 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); +int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, + uint64_t process_context_addr, + uint32_t spi_gdbg_per_vmid_cntl, + const uint32_t *tcp_watch_cntl, + uint32_t flags); int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, int queue_type, int idx, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 90b4a74ccf01..861910a6662d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -339,6 +339,18 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; break; + case MES_MISC_OP_SET_SHADER_DEBUGGER: + misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER; + misc_pkt.set_shader_debugger.process_context_addr = + input->set_shader_debugger.process_context_addr; + misc_pkt.set_shader_debugger.flags.u32all = + input->set_shader_debugger.flags.u32all; + misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl = + input->set_shader_debugger.spi_gdbg_per_vmid_cntl; + memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl, + input->set_shader_debugger.tcp_watch_cntl, + sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); + break; default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index dc694cb246d9..f3c15f18ddb5 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -274,7 +274,8 @@ union MESAPI__ADD_QUEUE { uint32_t is_kfd_process : 1; uint32_t trap_en : 1; uint32_t is_aql_queue : 1; - uint32_t reserved : 20; + uint32_t skip_process_ctx_clear : 1; + uint32_t reserved : 19; }; struct MES_API_STATUS api_status; uint64_t tma_addr; @@ -523,6 +524,7 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__QUERY_STATUS, MESAPI_MISC__READ_REG, MESAPI_MISC__WAIT_REG_MEM, + MESAPI_MISC__SET_SHADER_DEBUGGER, MESAPI_MISC__MAX, }; @@ -561,6 +563,20 @@ struct QUERY_STATUS { uint32_t context_id; }; +struct SET_SHADER_DEBUGGER { + uint64_t process_context_addr; + union { + struct { + uint32_t single_memop : 1; /* SQ_DEBUG.single_memop */ + uint32_t single_alu_op : 1; /* SQ_DEBUG.single_alu_op */ + uint32_t reserved : 30; + }; + uint32_t u32all; + } flags; + uint32_t spi_gdbg_per_vmid_cntl; + uint32_t tcp_watch_cntl[4]; /* TCP_WATCHx_CNTL */ +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -573,6 +589,9 @@ union MESAPI__MISC { struct QUERY_STATUS query_status; struct READ_REG read_reg; struct WAIT_REG_MEM wait_reg_mem; + struct SET_SHADER_DEBUGGER set_shader_debugger; + enum MES_AMD_PRIORITY_LEVEL queue_sch_level; + uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; }; From 218895820e6fccade42a7c3ab9c0a44dec0a1ebc Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 5 Apr 2022 12:34:55 -0400 Subject: [PATCH 758/861] drm/amdkfd: add per process hw trap enable and disable functions To enable HW debug mode per process, all devices must be debug enabled successfully. If a failure occures, rewind the enablement of debug mode on the enabled devices. A power management scenario that needs to be considered is HW debug mode setting during GFXOFF. During GFXOFF, these registers will be unreachable so we have to transiently disable GFXOFF when setting. Also, some devices don't support the RLC save restore function for these debug registers so we have to disable GFXOFF completely during a debug session. Cooperative launch also has debugging restriction based on HW/FW bugs. If such bugs exists, the debugger cannot attach to a process that uses GWS resources nor can GWS resources be requested if a process is being debugged. Multi-process debug devices can only enable trap temporaries based on certain runtime scenerios, which will be explained when the runtime enable functions are implemented in a follow up patch. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 148 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 29 +++++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 ++ 4 files changed, 190 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ee086a0a46df..826a99acb6fb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1488,6 +1488,11 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep, goto out_unlock; } + if (!kfd_dbg_has_gws_support(dev) && p->debug_trap_enabled) { + retval = -EBUSY; + goto out_unlock; + } + retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 898cc1fe3d13..73b07b5f17f1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -21,13 +21,78 @@ */ #include "kfd_debug.h" +#include "kfd_device_queue_manager.h" #include +static int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) +{ + uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; + uint32_t flags = pdd->process->dbg_flags; + + if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) + return 0; + + return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl, + pdd->watch_points, flags); +} + +/* kfd_dbg_trap_deactivate: + * target: target process + * unwind: If this is unwinding a failed kfd_dbg_trap_enable() + * unwind_count: + * If unwind == true, how far down the pdd list we need + * to unwind + * else: ignored + */ +static void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count) +{ + int i; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + /* If this is an unwind, and we have unwound the required + * enable calls on the pdd list, we need to stop now + * otherwise we may mess up another debugger session. + */ + if (unwind && i == unwind_count) + break; + + /* GFX off is already disabled by debug activate if not RLC restore supported. */ + if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->spi_dbg_override = + pdd->dev->kfd2kgd->disable_debug_trap( + pdd->dev->adev, + target->runtime_info.ttmp_setup, + pdd->dev->vm_info.last_vmid_kfd); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!kfd_dbg_is_per_vmid_supported(pdd->dev) && + release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd)) + pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + debug_refresh_runlist(pdd->dev->dqm); + else + kfd_dbg_set_mes_debug_mode(pdd); + } +} + int kfd_dbg_trap_disable(struct kfd_process *target) { if (!target->debug_trap_enabled) return 0; + /* + * Defer deactivation to runtime if runtime not enabled otherwise reset + * attached running target runtime state to enable for re-attach. + */ + if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) + kfd_dbg_trap_deactivate(target, false, 0); + else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) + target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; + fput(target->dbg_ev_file); target->dbg_ev_file = NULL; @@ -42,16 +107,89 @@ int kfd_dbg_trap_disable(struct kfd_process *target) return 0; } +static int kfd_dbg_trap_activate(struct kfd_process *target) +{ + int i, r = 0; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) { + r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd); + + if (r) { + target->runtime_info.runtime_state = (r == -EBUSY) ? + DEBUG_RUNTIME_STATE_ENABLED_BUSY : + DEBUG_RUNTIME_STATE_ENABLED_ERROR; + + goto unwind_err; + } + } + + /* Disable GFX OFF to prevent garbage read/writes to debug registers. + * If RLC restore of debug registers is not supported and runtime enable + * hasn't done so already on ttmp setup request, restore the trap config registers. + * + * If RLC restore of debug registers is not supported, keep gfx off disabled for + * the debug session. + */ + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) || + target->runtime_info.ttmp_setup)) + pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true, + pdd->dev->vm_info.last_vmid_kfd); + + pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( + pdd->dev->adev, + false, + pdd->dev->vm_info.last_vmid_kfd); + + if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_refresh_runlist(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + if (r) { + target->runtime_info.runtime_state = + DEBUG_RUNTIME_STATE_ENABLED_ERROR; + goto unwind_err; + } + } + + return 0; + +unwind_err: + /* Enabling debug failed, we need to disable on + * all GPUs so the enable is all or nothing. + */ + kfd_dbg_trap_deactivate(target, true, i); + return r; +} + int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, uint32_t *runtime_size) { struct file *f; uint32_t copy_size; - int r = 0; + int i, r = 0; if (target->debug_trap_enabled) return -EALREADY; + /* Enable pre-checks */ + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + if (!KFD_IS_SOC15(pdd->dev)) + return -ENODEV; + + if (!kfd_dbg_has_gws_support(pdd->dev) && pdd->qpd.num_gws) + return -EBUSY; + } + copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info)); f = fget(fd); @@ -62,6 +200,10 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, target->dbg_ev_file = f; + /* defer activation to runtime if not runtime enabled */ + if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) + kfd_dbg_trap_activate(target); + /* We already hold the process reference but hold another one for the * debug session. */ @@ -71,8 +213,10 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, if (target->debugger_process) atomic_inc(&target->debugger_process->debugged_process_count); - if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) + if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) { + kfd_dbg_trap_deactivate(target, false, 0); r = -EFAULT; + } *runtime_size = sizeof(target->runtime_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index db6d72e7930f..17481f824647 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -34,4 +34,33 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2); } +/* + * If GFX off is enabled, chips that do not support RLC restore for the debug + * registers will disable GFX off temporarily for the entire debug session. + * See disable_on_trap_action_entry and enable_on_trap_action_exit for details. + */ +static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev) +{ + return !(KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 10) || + KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1)); +} + +static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) +{ + if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1) + && dev->kfd->mec2_fw_version < 0x81b6) || + (KFD_GC_VERSION(dev) >= IP_VERSION(9, 1, 0) + && KFD_GC_VERSION(dev) <= IP_VERSION(9, 2, 2) + && dev->kfd->mec2_fw_version < 0x1b6) || + (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0) + && dev->kfd->mec2_fw_version < 0x1b6) || + (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) + && dev->kfd->mec2_fw_version < 0x30) || + (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0))) + return false; + + /* Assume debugging and cooperative launch supported otherwise. */ + return true; +} #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 725d936b2cc7..e77cadadb09b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1165,9 +1165,19 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn) static void kfd_process_notifier_release_internal(struct kfd_process *p) { + int i; + cancel_delayed_work_sync(&p->eviction_work); cancel_delayed_work_sync(&p->restore_work); + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + + /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */ + if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup) + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + } + /* Indicate to other users that MM is no longer valid */ p->mm = NULL; kfd_dbg_trap_disable(p); From 69a8c3ae2dea84a6d571e4c1aad306f630f3ccfd Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 1 Sep 2022 11:27:15 -0400 Subject: [PATCH 759/861] drm/amdkfd: apply trap workaround for gfx11 Due to a HW bug, waves in only half the shader arrays can enter trap. When starting a debug session, relocate all waves to the first shader array of each shader engine and mask off the 2nd shader array as unavailable. When ending a debug session, re-enable the 2nd shader array per shader engine. User CU masking per queue cannot be guaranteed to remain functional if requested during debugging (e.g. user cu mask requests only 2nd shader array as an available resource leading to zero HW resources available) nor can runtime be alerted of any of these changes during execution. Make user CU masking and debugging mutual exclusive with respect to availability. If the debugger tries to attach to a process with a user cu masked queue, return the runtime status as enabled but busy. If the debugger tries to attach and fails to reallocate queue waves to the first shader array of each shader engine, return the runtime status as enabled but with an error. In addition, like any other mutli-process debug supported devices, disable trap temporary setup per-process to avoid performance impact from setup overhead. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 2 + drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 7 +-- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 - drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 57 +++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 3 +- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +++ .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 3 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 3 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 42 ++++++++++---- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 3 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +- .../amd/amdkfd/kfd_process_queue_manager.c | 9 ++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 7 ++- 14 files changed, 122 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index d20df0cf0d88..b5f5eed2b5ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -219,6 +219,8 @@ struct mes_add_queue_input { uint32_t gws_size; uint64_t tba_addr; uint64_t tma_addr; + uint32_t trap_en; + uint32_t skip_process_ctx_clear; uint32_t is_kfd_process; uint32_t is_aql_queue; uint32_t queue_size; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 861910a6662d..c4e3cb8d44de 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -202,17 +202,14 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.gws_size = input->gws_size; mes_add_queue_pkt.trap_handler_addr = input->tba_addr; mes_add_queue_pkt.tma_addr = input->tma_addr; + mes_add_queue_pkt.trap_en = input->trap_en; + mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear; mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.gds_size = input->queue_size; - if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) && - (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) && - (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3)))) - mes_add_queue_pkt.trap_en = 1; - /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.gds_size = input->queue_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 826a99acb6fb..d4df424e4514 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -537,8 +537,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, goto out; } - minfo.update_flag = UPDATE_FLAG_CU_MASK; - mutex_lock(&p->mutex); retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 73b07b5f17f1..5e2ee2d1acc4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -24,6 +24,57 @@ #include "kfd_device_queue_manager.h" #include +static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) +{ + struct mqd_update_info minfo = {0}; + int err; + + if (!q) + return 0; + + if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || + KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0)) + return 0; + + if (enable && q->properties.is_user_cu_masked) + return -EBUSY; + + minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE; + + q->properties.is_dbg_wa = enable; + err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); + if (err) + q->properties.is_dbg_wa = false; + + return err; +} + +static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable) +{ + struct process_queue_manager *pqm = &target->pqm; + struct process_queue_node *pqn; + int r = 0; + + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + r = kfd_dbg_set_queue_workaround(pqn->q, enable); + if (enable && r) + goto unwind; + } + + return 0; + +unwind: + list_for_each_entry(pqn, &pqm->queues, process_queue_list) + kfd_dbg_set_queue_workaround(pqn->q, false); + + if (enable) + target->runtime_info.runtime_state = r == -EBUSY ? + DEBUG_RUNTIME_STATE_ENABLED_BUSY : + DEBUG_RUNTIME_STATE_ENABLED_ERROR; + + return r; +} + static int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; @@ -77,6 +128,8 @@ static void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int else kfd_dbg_set_mes_debug_mode(pdd); } + + kfd_dbg_set_workaround(target, false); } int kfd_dbg_trap_disable(struct kfd_process *target) @@ -111,6 +164,10 @@ static int kfd_dbg_trap_activate(struct kfd_process *target) { int i, r = 0; + r = kfd_dbg_set_workaround(target, true); + if (r) + return r; + for (i = 0; i < target->n_pdds; i++) { struct kfd_process_device *pdd = target->pdds[i]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 17481f824647..3e56225f6ef6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -31,7 +31,8 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, uint32_t *runtime_info_size); static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) { - return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2); + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 495c9238254e..44d87943e40a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -226,6 +226,10 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.paging = false; queue_input.tba_addr = qpd->tba_addr; queue_input.tma_addr = qpd->tma_addr; + queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || + KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0) || + q->properties.is_dbg_wa; + queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; queue_type = convert_to_mes_queue_type(q->properties.type); if (queue_type < 0) { @@ -1716,6 +1720,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, * updates the is_evicted flag but is a no-op otherwise. */ q->properties.is_evicted = !!qpd->evicted; + q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && + KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(q->device) < IP_VERSION(12, 0, 0); if (qd) mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index eb11940bec34..65c9f01a1f86 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct cik_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index eaaa4f4ddaaa..a0ac4f2fe6b5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct v10_compute_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 3a48bbc589fe..9a9b4e853516 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -46,15 +46,33 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, { struct v11_compute_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; + bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE | + UPDATE_FLAG_DBG_WA_DISABLE)); - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr)) return; + m = get_mqd(mqd); + + if (has_wa_flag) { + uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? + 0xffff : 0xffffffff; + + m->compute_static_thread_mgmt_se0 = wa_mask; + m->compute_static_thread_mgmt_se1 = wa_mask; + m->compute_static_thread_mgmt_se2 = wa_mask; + m->compute_static_thread_mgmt_se3 = wa_mask; + m->compute_static_thread_mgmt_se4 = wa_mask; + m->compute_static_thread_mgmt_se5 = wa_mask; + m->compute_static_thread_mgmt_se6 = wa_mask; + m->compute_static_thread_mgmt_se7 = wa_mask; + + return; + } + mqd_symmetrically_map_cu_mask(mm, minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); - m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se1 = se_mask[1]; m->compute_static_thread_mgmt_se2 = se_mask[2]; @@ -109,6 +127,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, uint64_t addr; struct v11_compute_mqd *m; int size; + uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff; m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; @@ -122,14 +141,15 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->header = 0xC0310800; m->compute_pipelinestat_enable = 1; - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; + + m->compute_static_thread_mgmt_se0 = wa_mask; + m->compute_static_thread_mgmt_se1 = wa_mask; + m->compute_static_thread_mgmt_se2 = wa_mask; + m->compute_static_thread_mgmt_se3 = wa_mask; + m->compute_static_thread_mgmt_se4 = wa_mask; + m->compute_static_thread_mgmt_se5 = wa_mask; + m->compute_static_thread_mgmt_se6 = wa_mask; + m->compute_static_thread_mgmt_se7 = wa_mask; m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index b7c95158d4a0..5b87c244e909 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -65,8 +65,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct v9_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index fe69492b1bb3..d1e962da51dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -51,8 +51,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct vi_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 8fca7175daab..f0a45d184c8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -513,6 +513,8 @@ struct queue_properties { bool is_active; bool is_gws; uint32_t pm4_target_xcc; + bool is_dbg_wa; + bool is_user_cu_masked; /* Not relevant for user mode queues in cp scheduling */ unsigned int vmid; /* Relevant only for sdma queues*/ @@ -535,7 +537,8 @@ struct queue_properties { !(q).is_evicted) enum mqd_update_flag { - UPDATE_FLAG_CU_MASK = 0, + UPDATE_FLAG_DBG_WA_ENABLE = 1, + UPDATE_FLAG_DBG_WA_DISABLE = 2, }; struct mqd_update_info { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index b100933340d2..43d432b5c5bc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -506,8 +506,12 @@ int pqm_update_mqd(struct process_queue_manager *pqm, return -EFAULT; } + /* CUs are masked for debugger requirements so deny user mask */ + if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr) + return -EBUSY; + /* ASICs that have WGPs must enforce pairwise enabled mask checks. */ - if (minfo && minfo->update_flag == UPDATE_FLAG_CU_MASK && minfo->cu_mask.ptr && + if (minfo && minfo->cu_mask.ptr && KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) { int i; @@ -526,6 +530,9 @@ int pqm_update_mqd(struct process_queue_manager *pqm, if (retval != 0) return retval; + if (minfo && minfo->cu_mask.ptr) + pqn->q->properties.is_user_cu_masked = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3def25b2bdbb..faa7939f35bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1863,10 +1863,13 @@ static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *de { bool firmware_supported = true; + /* + * FIXME: GFX11 FW currently not sufficient to deal with CWSR WA. + * Updated FW with API changes coming soon. + */ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { - firmware_supported = - (dev->gpu->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 9; + firmware_supported = false; goto out; } From 44b87bb0836c65d1b9d21b01503eb6e9b9297771 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 6 Apr 2022 12:03:31 -0400 Subject: [PATCH 760/861] drm/amdkfd: add raise exception event function Exception events can be generated from interrupts or queue activitity. The raise event function will save exception status of a queue, device or process then notify the debugger of the status change by writing to a debugger polled file descriptor that the debugger provides during debug attach. For memory violation exceptions, extra exception data will be saved. The debugger will be able to query the saved exception states by query operation that will be provided by follow up patches. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 104 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 7 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 10 +++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 + 4 files changed, 123 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 5e2ee2d1acc4..dccb27fc764b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -24,6 +24,107 @@ #include "kfd_device_queue_manager.h" #include +void debug_event_write_work_handler(struct work_struct *work) +{ + struct kfd_process *process; + + static const char write_data = '.'; + loff_t pos = 0; + + process = container_of(work, + struct kfd_process, + debug_event_workarea); + + kernel_write(process->dbg_ev_file, &write_data, 1, &pos); +} + +/* update process/device/queue exception status, write to descriptor + * only if exception_status is enabled. + */ +bool kfd_dbg_ev_raise(uint64_t event_mask, + struct kfd_process *process, struct kfd_node *dev, + unsigned int source_id, bool use_worker, + void *exception_data, size_t exception_data_size) +{ + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + int i; + static const char write_data = '.'; + loff_t pos = 0; + bool is_subscribed = true; + + if (!(process && process->debug_trap_enabled)) + return false; + + mutex_lock(&process->event_mutex); + + if (event_mask & KFD_EC_MASK_DEVICE) { + for (i = 0; i < process->n_pdds; i++) { + struct kfd_process_device *pdd = process->pdds[i]; + + if (pdd->dev != dev) + continue; + + pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE; + + if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { + if (!pdd->vm_fault_exc_data) { + pdd->vm_fault_exc_data = kmemdup( + exception_data, + exception_data_size, + GFP_KERNEL); + if (!pdd->vm_fault_exc_data) + pr_debug("Failed to allocate exception data memory"); + } else { + pr_debug("Debugger exception data not saved\n"); + print_hex_dump_bytes("exception data: ", + DUMP_PREFIX_OFFSET, + exception_data, + exception_data_size); + } + } + break; + } + } else if (event_mask & KFD_EC_MASK_PROCESS) { + process->exception_status |= event_mask & KFD_EC_MASK_PROCESS; + } else { + pqm = &process->pqm; + list_for_each_entry(pqn, &pqm->queues, + process_queue_list) { + int target_id; + + if (!pqn->q) + continue; + + target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ? + pqn->q->properties.queue_id : + pqn->q->doorbell_id; + + if (pqn->q->device != dev || target_id != source_id) + continue; + + pqn->q->properties.exception_status |= event_mask; + break; + } + } + + if (process->exception_enable_mask & event_mask) { + if (use_worker) + schedule_work(&process->debug_event_workarea); + else + kernel_write(process->dbg_ev_file, + &write_data, + 1, + &pos); + } else { + is_subscribed = false; + } + + mutex_unlock(&process->event_mutex); + + return is_subscribed; +} + static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) { struct mqd_update_info minfo = {0}; @@ -99,6 +200,9 @@ static void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int { int i; + if (!unwind) + cancel_work_sync(&target->debug_event_workarea); + for (i = 0; i < target->n_pdds; i++) { struct kfd_process_device *pdd = target->pdds[i]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 3e56225f6ef6..66ee7b95d08a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -25,6 +25,11 @@ #include "kfd_priv.h" +bool kfd_dbg_ev_raise(uint64_t event_mask, + struct kfd_process *process, struct kfd_node *dev, + unsigned int source_id, bool use_worker, + void *exception_data, + size_t exception_data_size); int kfd_dbg_trap_disable(struct kfd_process *target); int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, @@ -35,6 +40,8 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0); } +void debug_event_write_work_handler(struct work_struct *work); + /* * If GFX off is enabled, chips that do not support RLC restore for the debug * registers will disable GFX off temporarily for the entire debug session. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f0a45d184c8f..b18cd4bf76bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -529,6 +529,7 @@ struct queue_properties { uint32_t ctl_stack_size; uint64_t tba_addr; uint64_t tma_addr; + uint64_t exception_status; }; #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ @@ -820,6 +821,11 @@ struct kfd_process_device { uint64_t page_in; uint64_t page_out; + /* Exception code status*/ + uint64_t exception_status; + void *vm_fault_exc_data; + size_t vm_fault_exc_data_size; + /* Tracks debug per-vmid request settings */ uint32_t spi_dbg_override; uint32_t spi_dbg_launch_mode; @@ -955,12 +961,16 @@ struct kfd_process { /* Exception code enable mask and status */ uint64_t exception_enable_mask; + uint64_t exception_status; /* shared virtual memory registered by this process */ struct svm_range_list svms; bool xnack_enabled; + /* Work area for debugger event writer worker. */ + struct work_struct debug_event_workarea; + /* Tracks debug per-vmid request for debug flags */ bool dbg_flags; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index e77cadadb09b..f904d6d6e01c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1509,6 +1509,8 @@ static struct kfd_process *create_process(const struct task_struct *thread) kfd_unref_process(process); get_task_struct(process->lead_thread); + INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler); + return process; err_register_notifier: From c2d2588c702364ff53916ddd97e2b26fd4f4a317 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 8 Apr 2022 12:49:48 -0400 Subject: [PATCH 761/861] drm/amdkfd: add send exception operation Add a debug operation that allows the debugger to send an exception directly to runtime through a payload address. For memory violations, normal vmfault signals will be applied to notify runtime instead after passing in the saved exception data when a memory violation was raised to the debugger. For runtime exceptions, this will unblock the runtime enable function which will be explained and implemented in a follow up patch. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/cik_event_interrupt.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 43 +++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 6 ++ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 3 +- .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 7 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 71 ++++++++++++++++++- 8 files changed, 135 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 4ebfff6b6c55..795382b55e0a 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -118,9 +118,9 @@ static void cik_event_interrupt_wq(struct kfd_node *dev, return; if (info.vmid == vmid) - kfd_signal_vm_fault_event(dev, pasid, &info); + kfd_signal_vm_fault_event(dev, pasid, &info, NULL); else - kfd_signal_vm_fault_event(dev, pasid, NULL); + kfd_signal_vm_fault_event(dev, pasid, NULL, NULL); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index d4df424e4514..5e57b3e96ff9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2833,6 +2833,11 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v r = kfd_dbg_trap_disable(target); break; case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: + r = kfd_dbg_send_exception_to_runtime(target, + args->send_runtime_event.gpu_id, + args->send_runtime_event.queue_id, + args->send_runtime_event.exception_mask); + break; case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index dccb27fc764b..61098975bb0e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -125,6 +125,49 @@ bool kfd_dbg_ev_raise(uint64_t event_mask, return is_subscribed; } +int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, + unsigned int dev_id, + unsigned int queue_id, + uint64_t error_reason) +{ + if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { + struct kfd_process_device *pdd = NULL; + struct kfd_hsa_memory_exception_data *data; + int i; + + for (i = 0; i < p->n_pdds; i++) { + if (p->pdds[i]->dev->id == dev_id) { + pdd = p->pdds[i]; + break; + } + } + + if (!pdd) + return -ENODEV; + + data = (struct kfd_hsa_memory_exception_data *) + pdd->vm_fault_exc_data; + + kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid); + kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data); + error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION); + } + + if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) { + /* + * block should only happen after the debugger receives runtime + * enable notice. + */ + up(&p->runtime_enable_sema); + error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME); + } + + if (error_reason) + return kfd_send_exception_to_runtime(p, queue_id, error_reason); + + return 0; +} + static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) { struct mqd_update_info minfo = {0}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 66ee7b95d08a..2c6866bb8850 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -34,6 +34,12 @@ int kfd_dbg_trap_disable(struct kfd_process *target); int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, uint32_t *runtime_info_size); + +int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, + unsigned int dev_id, + unsigned int queue_id, + uint64_t error_reason); + static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) { return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 9926186f88a6..0a5e7b172a64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -1222,7 +1222,8 @@ void kfd_signal_hw_exception_event(u32 pasid) } void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, - struct kfd_vm_fault_info *info) + struct kfd_vm_fault_info *info, + struct kfd_hsa_memory_exception_data *data) { struct kfd_event *ev; uint32_t id; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 861bccb1e9dc..8cf58be80f4e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -362,7 +362,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, kfd_smi_event_update_vmfault(dev, pasid); kfd_dqm_evict_pasid(dev->dqm, pasid); - kfd_signal_vm_fault_event(dev, pasid, &info); + kfd_signal_vm_fault_event(dev, pasid, &info, NULL); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b18cd4bf76bf..58b82fa59584 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -979,6 +979,7 @@ struct kfd_process { bool queues_paused; /* Tracks runtime enable status */ + struct semaphore runtime_enable_sema; struct kfd_runtime_info runtime_info; }; @@ -1447,7 +1448,8 @@ int kfd_get_num_events(struct kfd_process *p); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, - struct kfd_vm_fault_info *info); + struct kfd_vm_fault_info *info, + struct kfd_hsa_memory_exception_data *data); void kfd_signal_reset_event(struct kfd_node *dev); @@ -1463,6 +1465,9 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); } +int kfd_send_exception_to_runtime(struct kfd_process *p, + unsigned int queue_id, + uint64_t error_reason); bool kfd_is_locked(void); /* Compute profile */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index f904d6d6e01c..5cbfcaf08c8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1462,6 +1462,7 @@ static struct kfd_process *create_process(const struct task_struct *thread) process->debugger_process = NULL; process->exception_enable_mask = 0; atomic_set(&process->debugged_process_count, 0); + sema_init(&process->runtime_enable_sema, 0); process->pasid = kfd_pasid_alloc(); if (process->pasid == 0) { @@ -2120,6 +2121,75 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) } } +struct send_exception_work_handler_workarea { + struct work_struct work; + struct kfd_process *p; + unsigned int queue_id; + uint64_t error_reason; +}; + +static void send_exception_work_handler(struct work_struct *work) +{ + struct send_exception_work_handler_workarea *workarea; + struct kfd_process *p; + struct queue *q; + struct mm_struct *mm; + struct kfd_context_save_area_header __user *csa_header; + uint64_t __user *err_payload_ptr; + uint64_t cur_err; + uint32_t ev_id; + + workarea = container_of(work, + struct send_exception_work_handler_workarea, + work); + p = workarea->p; + + mm = get_task_mm(p->lead_thread); + + if (!mm) + return; + + kthread_use_mm(mm); + + q = pqm_get_user_queue(&p->pqm, workarea->queue_id); + + if (!q) + goto out; + + csa_header = (void __user *)q->properties.ctx_save_restore_area_address; + + get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr); + get_user(cur_err, err_payload_ptr); + cur_err |= workarea->error_reason; + put_user(cur_err, err_payload_ptr); + get_user(ev_id, &csa_header->err_event_id); + + kfd_set_event(p, ev_id); + +out: + kthread_unuse_mm(mm); + mmput(mm); +} + +int kfd_send_exception_to_runtime(struct kfd_process *p, + unsigned int queue_id, + uint64_t error_reason) +{ + struct send_exception_work_handler_workarea worker; + + INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler); + + worker.p = p; + worker.queue_id = queue_id; + worker.error_reason = error_reason; + + schedule_work(&worker.work); + flush_work(&worker.work); + destroy_work_on_stack(&worker.work); + + return 0; +} + struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id) { int i; @@ -2179,4 +2249,3 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) } #endif - From 455227c4642c5e1867213cea73a527e431779060 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 8 Apr 2022 13:12:24 -0400 Subject: [PATCH 762/861] drm/amdkfd: add runtime enable operation The debugger can attach to a process prior to HSA enablement (i.e. inferior is spawned by the debugger and attached to immediately before target process has been enabled for HSA dispatches) or it can attach to a running target that is already HSA enabled. Either way, the debugger needs to know the enablement status to know when it can inspect queues. For the scenario where the debugger spawns the target process, it will have to wait for ROCr's runtime enable request from the target. The runtime enable request will be able to see that its process has been debug attached. ROCr raises an EC_PROCESS_RUNTIME signal to the debugger then blocks the target process while waiting the debugger's response. Once the debugger has received the runtime signal, it will unblock the target process. For the scenario where the debugger attaches to a running target process, ROCr will set the target process' runtime status as enabled so that on an attach request, the debugger will be able to see this status and will continue with debug enablement as normal. A secondary requirement is to conditionally enable the trap tempories only if the user requests it (env var HSA_ENABLE_DEBUG=1) or if the debugger attaches with HSA runtime enabled. This is because setting up the trap temporaries incurs a performance overhead that is unacceptable for microbench performance in normal mode for certain customers. In the scenario where the debugger spawns the target process, when ROCr detects that the debugger has attached during the runtime enable request, it will enable the trap temporaries before it blocks the target process while waiting for the debugger to respond. In the scenario where the debugger attaches to a running target process, it will enable to trap temporaries itself. Finally, there is an additional restriction that is required to be enforced with runtime enable and HW debug mode setting. The debugger must first ensure that HW debug mode has been enabled before permitting HW debug mode operations. With single process debug devices, allowing the debugger to set debug HW modes prior to trap activation means that debug HW mode setting can occur before the KFD has reserved the debug VMID (0xf) from the hardware scheduler's VMID allocation resource pool. This can result in the hardware scheduler assigning VMID 0xf to a non-debugged process and having that process inherit debug HW mode settings intended for the debugged target process instead, which is both incorrect and potentially fatal for normal mode operation. With multi process debug devices, allowing the debugger to set debug HW modes prior to trap activation means that non-debugged processes migrating to a new VMID could inherit unintended debug settings. All debug operations that touch HW settings must require trap activation where trap activation is triggered by both debug attach and runtime enablement (target has KFD opened and is ready to dispatch work). Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 143 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 4 + drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + 4 files changed, 150 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5e57b3e96ff9..615fa9ab36b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2738,9 +2738,138 @@ static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data) return ret; } +static int runtime_enable(struct kfd_process *p, uint64_t r_debug, + bool enable_ttmp_setup) +{ + int i = 0, ret = 0; + + if (p->is_runtime_retry) + goto retry; + + if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) + return -EBUSY; + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + + if (pdd->qpd.queue_count) + return -EEXIST; + } + + p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; + p->runtime_info.r_debug = r_debug; + p->runtime_info.ttmp_setup = enable_ttmp_setup; + + if (p->runtime_info.ttmp_setup) { + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + + if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) { + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->dev->kfd2kgd->enable_debug_trap( + pdd->dev->adev, + true, + pdd->dev->vm_info.last_vmid_kfd); + } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { + pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( + pdd->dev->adev, + false, + 0); + } + } + } + +retry: + if (p->debug_trap_enabled) { + if (!p->is_runtime_retry) { + kfd_dbg_trap_activate(p); + kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME), + p, NULL, 0, false, NULL, 0); + } + + mutex_unlock(&p->mutex); + ret = down_interruptible(&p->runtime_enable_sema); + mutex_lock(&p->mutex); + + p->is_runtime_retry = !!ret; + } + + return ret; +} + +static int runtime_disable(struct kfd_process *p) +{ + int i = 0, ret; + bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED; + + p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED; + p->runtime_info.r_debug = 0; + + if (p->debug_trap_enabled) { + if (was_enabled) + kfd_dbg_trap_deactivate(p, false, 0); + + if (!p->is_runtime_retry) + kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME), + p, NULL, 0, false, NULL, 0); + + mutex_unlock(&p->mutex); + ret = down_interruptible(&p->runtime_enable_sema); + mutex_lock(&p->mutex); + + p->is_runtime_retry = !!ret; + if (ret) + return ret; + } + + if (was_enabled && p->runtime_info.ttmp_setup) { + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + + if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + } + } + + p->runtime_info.ttmp_setup = false; + + /* disable ttmp setup */ + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + + if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { + pdd->spi_dbg_override = + pdd->dev->kfd2kgd->disable_debug_trap( + pdd->dev->adev, + false, + pdd->dev->vm_info.last_vmid_kfd); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + debug_refresh_runlist(pdd->dev->dqm); + else + kfd_dbg_set_mes_debug_mode(pdd); + } + } + + return 0; +} + static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) { - return 0; + struct kfd_ioctl_runtime_enable_args *args = data; + int r; + + mutex_lock(&p->mutex); + + if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK) + r = runtime_enable(p, args->r_debug, + !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK)); + else + r = runtime_disable(p); + + mutex_unlock(&p->mutex); + + return r; } static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) @@ -2815,6 +2944,18 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v goto unlock_out; } + if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED && + (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE || + args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE || + args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES || + args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES || + args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || + args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH || + args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) { + r = -EPERM; + goto unlock_out; + } + switch (args->op) { case KFD_IOC_DBG_TRAP_ENABLE: if (target != p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 61098975bb0e..a19c21d04438 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -219,7 +219,7 @@ unwind: return r; } -static int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) +int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t flags = pdd->process->dbg_flags; @@ -239,7 +239,7 @@ static int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) * to unwind * else: ignored */ -static void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count) +void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count) { int i; @@ -307,7 +307,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target) return 0; } -static int kfd_dbg_trap_activate(struct kfd_process *target) +int kfd_dbg_trap_activate(struct kfd_process *target) { int i, r = 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 2c6866bb8850..fca928564948 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -25,6 +25,8 @@ #include "kfd_priv.h" +void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count); +int kfd_dbg_trap_activate(struct kfd_process *target); bool kfd_dbg_ev_raise(uint64_t event_mask, struct kfd_process *process, struct kfd_node *dev, unsigned int source_id, bool use_worker, @@ -77,4 +79,6 @@ static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) /* Assume debugging and cooperative launch supported otherwise. */ return true; } + +int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 58b82fa59584..4b80f74b9de0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -980,6 +980,7 @@ struct kfd_process { /* Tracks runtime enable status */ struct semaphore runtime_enable_sema; + bool is_runtime_retry; struct kfd_runtime_info runtime_info; }; From 50cff45e274896235d371f16eab67a180e12a732 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Mon, 1 Mar 2021 18:34:39 -0600 Subject: [PATCH 763/861] drm/amdkfd: add debug trap enabled flag to tma Trap handler behavior will differ when a debugger is attached. Make the debug trap flag available in the trap handler TMA. Update it when the debug trap ioctl is invoked. Signed-off-by: Jay Cornwall Reviewed-by: Felix Kuehling Signed-off-by: Jonathan Kim Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 11 +++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 15 +++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index a19c21d04438..17e8e9edccbf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -256,6 +256,8 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (unwind && i == unwind_count) break; + kfd_process_set_trap_debug_flag(&pdd->qpd, false); + /* GFX off is already disabled by debug activate if not RLC restore supported. */ if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) amdgpu_gfx_off_ctrl(pdd->dev->adev, false); @@ -351,6 +353,15 @@ int kfd_dbg_trap_activate(struct kfd_process *target) if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + /* + * Setting the debug flag in the trap handler requires that the TMA has been + * allocated, which occurs during CWSR initialization. + * In the event that CWSR has not been initialized at this point, setting the + * flag will be called again during CWSR initialization if the target process + * is still debug enabled. + */ + kfd_process_set_trap_debug_flag(&pdd->qpd, true); + if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4b80f74b9de0..a02fb939614a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1157,6 +1157,8 @@ int kfd_init_apertures(struct kfd_process *process); void kfd_process_set_trap_handler(struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr); +void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, + bool enabled); /* CWSR initialization */ int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 5cbfcaf08c8f..3b7f219c9d06 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1309,6 +1309,8 @@ int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); + kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled); + qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); @@ -1345,6 +1347,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); + kfd_process_set_trap_debug_flag(&pdd->qpd, + pdd->process->debug_trap_enabled); + qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); @@ -1431,6 +1436,16 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) return true; } +void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, + bool enabled) +{ + if (qpd->cwsr_kaddr) { + uint64_t *tma = + (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); + tma[2] = enabled; + } +} + /* * On return the kfd_process is fully operational and will be freed when the * mm is released From a37d23f816b18a324c24d066d5bc453308913bf9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 11 Apr 2023 11:25:52 +0800 Subject: [PATCH 764/861] drm/amd/pm: update SMU13 header files for coming OD support Correct the data structures for OD feature support. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h | 16 +++++----- .../inc/pmfw_if/smu13_driver_if_v13_0_0.h | 18 +++++++----- .../inc/pmfw_if/smu13_driver_if_v13_0_7.h | 29 ++++--------------- .../amd/pm/swsmu/inc/smu_v13_0_7_pptable.h | 16 +++++----- 4 files changed, 34 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h index 566a0da59e53..1dc7a065a6d4 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h @@ -38,13 +38,12 @@ #define SMU_13_0_0_PP_THERMALCONTROLLER_NONE 0 #define SMU_13_0_0_PP_THERMALCONTROLLER_NAVI21 28 -#define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x81 // OverDrive 8 Table Version 0.2 +#define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2 #define SMU_13_0_0_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 enum SMU_13_0_0_ODFEATURE_CAP { SMU_13_0_0_ODCAP_GFXCLK_LIMITS = 0, - SMU_13_0_0_ODCAP_GFXCLK_CURVE, SMU_13_0_0_ODCAP_UCLK_LIMITS, SMU_13_0_0_ODCAP_POWER_LIMIT, SMU_13_0_0_ODCAP_FAN_ACOUSTIC_LIMIT, @@ -59,13 +58,13 @@ enum SMU_13_0_0_ODFEATURE_CAP SMU_13_0_0_ODCAP_FAN_CURVE, SMU_13_0_0_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, SMU_13_0_0_ODCAP_POWER_MODE, + SMU_13_0_0_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET, SMU_13_0_0_ODCAP_COUNT, }; enum SMU_13_0_0_ODFEATURE_ID { SMU_13_0_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature - SMU_13_0_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_13_0_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature SMU_13_0_0_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_UCLK_LIMITS, //UCLK Limit feature SMU_13_0_0_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_0_ODCAP_POWER_LIMIT, //Power Limit feature SMU_13_0_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature @@ -80,6 +79,7 @@ enum SMU_13_0_0_ODFEATURE_ID SMU_13_0_0_ODFEATURE_FAN_CURVE = 1 << SMU_13_0_0_ODCAP_FAN_CURVE, //Fan Curve feature SMU_13_0_0_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_0_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, //Auto Fan Acoustic RPM feature SMU_13_0_0_ODFEATURE_POWER_MODE = 1 << SMU_13_0_0_ODCAP_POWER_MODE, //Optimized GPU Power Mode feature + SMU_13_0_0_ODFEATURE_PER_ZONE_GFX_VOLTAGE_OFFSET = 1 << SMU_13_0_0_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET, //Perzone voltage offset feature SMU_13_0_0_ODFEATURE_COUNT = 16, }; @@ -89,10 +89,6 @@ enum SMU_13_0_0_ODSETTING_ID { SMU_13_0_0_ODSETTING_GFXCLKFMAX = 0, SMU_13_0_0_ODSETTING_GFXCLKFMIN, - SMU_13_0_0_ODSETTING_CUSTOM_GFX_VF_CURVE_A, - SMU_13_0_0_ODSETTING_CUSTOM_GFX_VF_CURVE_B, - SMU_13_0_0_ODSETTING_CUSTOM_GFX_VF_CURVE_C, - SMU_13_0_0_ODSETTING_CUSTOM_CURVE_VFT_FMIN, SMU_13_0_0_ODSETTING_UCLKFMIN, SMU_13_0_0_ODSETTING_UCLKFMAX, SMU_13_0_0_ODSETTING_POWERPERCENTAGE, @@ -117,6 +113,12 @@ enum SMU_13_0_0_ODSETTING_ID SMU_13_0_0_ODSETTING_FAN_CURVE_SPEED_5, SMU_13_0_0_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT, SMU_13_0_0_ODSETTING_POWER_MODE, + SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_1, + SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_2, + SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_3, + SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_4, + SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_5, + SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_6, SMU_13_0_0_ODSETTING_COUNT, }; #define SMU_13_0_0_MAX_ODSETTING 64 //Maximum Number of ODSettings diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index fe995651c6f5..e656e82a0154 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -667,7 +667,14 @@ typedef enum { #define PP_NUM_RTAVFS_PWL_ZONES 5 - +#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 +#define PP_OD_FEATURE_PPT_BIT 2 +#define PP_OD_FEATURE_FAN_CURVE_BIT 3 +#define PP_OD_FEATURE_GFXCLK_BIT 7 +#define PP_OD_FEATURE_UCLK_BIT 8 +#define PP_OD_FEATURE_ZERO_FAN_BIT 9 +#define PP_OD_FEATURE_TEMPERATURE_BIT 10 +#define PP_OD_FEATURE_COUNT 13 // VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 // Slope Q1.7, Offset Q1.2 @@ -689,10 +696,8 @@ typedef struct { //Voltage control int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; - uint16_t VddGfxVmax; // in mV - uint8_t IdlePwrSavingFeaturesCtrl; - uint8_t RuntimePwrSavingFeaturesCtrl; + uint32_t Reserved; //Frequency changes int16_t GfxclkFmin; // MHz @@ -729,10 +734,9 @@ typedef struct { uint32_t FeatureCtrlMask; int16_t VoltageOffsetPerZoneBoundary; - uint16_t VddGfxVmax; // in mV + uint16_t Reserved1; - uint8_t IdlePwrSavingFeaturesCtrl; - uint8_t RuntimePwrSavingFeaturesCtrl; + uint16_t Reserved2; int16_t GfxclkFmin; // MHz int16_t GfxclkFmax; // MHz diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h index 44e879c51cae..62b7c0daff68 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h @@ -683,18 +683,12 @@ typedef struct { #define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 -#define PP_OD_FEATURE_VMAX_BIT 1 #define PP_OD_FEATURE_PPT_BIT 2 #define PP_OD_FEATURE_FAN_CURVE_BIT 3 -#define PP_OD_FEATURE_FREQ_DETER_BIT 4 -#define PP_OD_FEATURE_FULL_CTRL_BIT 5 -#define PP_OD_FEATURE_TDC_BIT 6 #define PP_OD_FEATURE_GFXCLK_BIT 7 #define PP_OD_FEATURE_UCLK_BIT 8 #define PP_OD_FEATURE_ZERO_FAN_BIT 9 #define PP_OD_FEATURE_TEMPERATURE_BIT 10 -#define PP_OD_FEATURE_POWER_FEATURE_CTRL_BIT 11 -#define PP_OD_FEATURE_ASIC_TDC_BIT 12 #define PP_OD_FEATURE_COUNT 13 typedef enum { @@ -713,10 +707,8 @@ typedef struct { //Voltage control int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; - uint16_t VddGfxVmax; // in mV - uint8_t IdlePwrSavingFeaturesCtrl; - uint8_t RuntimePwrSavingFeaturesCtrl; + uint32_t Reserved; //Frequency changes int16_t GfxclkFmin; // MHz @@ -741,12 +733,7 @@ typedef struct { uint8_t MaxOpTemp; uint8_t Padding[4]; - uint16_t GfxVoltageFullCtrlMode; - uint16_t GfxclkFullCtrlMode; - uint16_t UclkFullCtrlMode; - int16_t AsicTdc; - - uint32_t Spare[10]; + uint32_t Spare[12]; uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround } OverDriveTable_t; @@ -759,10 +746,9 @@ typedef struct { uint32_t FeatureCtrlMask; int16_t VoltageOffsetPerZoneBoundary; - uint16_t VddGfxVmax; // in mV + uint16_t Reserved1; - uint8_t IdlePwrSavingFeaturesCtrl; - uint8_t RuntimePwrSavingFeaturesCtrl; + uint16_t Reserved2; int16_t GfxclkFmin; // MHz int16_t GfxclkFmax; // MHz @@ -785,12 +771,7 @@ typedef struct { uint8_t MaxOpTemp; uint8_t Padding[4]; - uint16_t GfxVoltageFullCtrlMode; - uint16_t GfxclkFullCtrlMode; - uint16_t UclkFullCtrlMode; - int16_t AsicTdc; - - uint32_t Spare[10]; + uint32_t Spare[12]; } OverDriveLimits_t; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index 478862ded0bd..eadbe0149cae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -38,13 +38,12 @@ #define SMU_13_0_7_PP_THERMALCONTROLLER_NONE 0 #define SMU_13_0_7_PP_THERMALCONTROLLER_NAVI21 28 -#define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x81 // OverDrive 8 Table Version 0.2 +#define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2 #define SMU_13_0_7_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 enum SMU_13_0_7_ODFEATURE_CAP { SMU_13_0_7_ODCAP_GFXCLK_LIMITS = 0, - SMU_13_0_7_ODCAP_GFXCLK_CURVE, SMU_13_0_7_ODCAP_UCLK_LIMITS, SMU_13_0_7_ODCAP_POWER_LIMIT, SMU_13_0_7_ODCAP_FAN_ACOUSTIC_LIMIT, @@ -59,13 +58,13 @@ enum SMU_13_0_7_ODFEATURE_CAP SMU_13_0_7_ODCAP_FAN_CURVE, SMU_13_0_7_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, SMU_13_0_7_ODCAP_POWER_MODE, + SMU_13_0_7_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET, SMU_13_0_7_ODCAP_COUNT, }; enum SMU_13_0_7_ODFEATURE_ID { SMU_13_0_7_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature - SMU_13_0_7_ODFEATURE_GFXCLK_CURVE = 1 << SMU_13_0_7_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature SMU_13_0_7_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_UCLK_LIMITS, //UCLK Limit feature SMU_13_0_7_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_7_ODCAP_POWER_LIMIT, //Power Limit feature SMU_13_0_7_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_7_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature @@ -80,6 +79,7 @@ enum SMU_13_0_7_ODFEATURE_ID SMU_13_0_7_ODFEATURE_FAN_CURVE = 1 << SMU_13_0_7_ODCAP_FAN_CURVE, //Fan Curve feature SMU_13_0_7_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_7_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, //Auto Fan Acoustic RPM feature SMU_13_0_7_ODFEATURE_POWER_MODE = 1 << SMU_13_0_7_ODCAP_POWER_MODE, //Optimized GPU Power Mode feature + SMU_13_0_7_ODFEATURE_PER_ZONE_GFX_VOLTAGE_OFFSET = 1 << SMU_13_0_7_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET, //Perzone voltage offset feature SMU_13_0_7_ODFEATURE_COUNT = 16, }; @@ -89,10 +89,6 @@ enum SMU_13_0_7_ODSETTING_ID { SMU_13_0_7_ODSETTING_GFXCLKFMAX = 0, SMU_13_0_7_ODSETTING_GFXCLKFMIN, - SMU_13_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_A, - SMU_13_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_B, - SMU_13_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_C, - SMU_13_0_7_ODSETTING_CUSTOM_CURVE_VFT_FMIN, SMU_13_0_7_ODSETTING_UCLKFMIN, SMU_13_0_7_ODSETTING_UCLKFMAX, SMU_13_0_7_ODSETTING_POWERPERCENTAGE, @@ -117,6 +113,12 @@ enum SMU_13_0_7_ODSETTING_ID SMU_13_0_7_ODSETTING_FAN_CURVE_SPEED_5, SMU_13_0_7_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT, SMU_13_0_7_ODSETTING_POWER_MODE, + SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_1, + SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_2, + SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_3, + SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_4, + SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_5, + SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_6, SMU_13_0_7_ODSETTING_COUNT, }; #define SMU_13_0_7_MAX_ODSETTING 64 //Maximum Number of ODSettings From 12fb1ad70d65edc3405884792d044fa79df7244f Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 22 Apr 2022 12:26:18 -0400 Subject: [PATCH 765/861] drm/amdkfd: update process interrupt handling for debug events The debugger must be notified by any debugger subscribed exception that comes from hardware interrupts. If a debugger session exits, any exceptions it subscribed to may still have interrupts in the interrupt ring buffer or KGD/KFD pipeline. To prevent a new session from inheriting stale interrupts, when a new queue is created, open an interrupt drain and allow the IH ring to drain from a timestamped checkpoint. Then inject a custom IV so that once the custom IV is picked up by the KFD, it's safe to close the drain and proceed with queue creation. The drain must also be on debug disable as SW interrupts may still be processed. Drain at this time and clear all the exception status. The debugger may also not be attached nor subscibed to certain exceptions so forward them directly to the runtime. GFX10 also requires its own IV processing, hence the creation of kfd_int_process_v10.c. This is because the IV from SQ interrupts are packed into a new continguous format unlike GFX9. To make this clear, a separate interrupting handling code file was created. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 16 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 + drivers/gpu/drm/amd/amdkfd/Makefile | 1 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 84 ++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 6 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +- .../gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 405 ++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 26 +- .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 102 ++++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 12 + drivers/gpu/drm/amd/amdkfd/kfd_process.c | 47 ++ .../amd/amdkfd/kfd_process_queue_manager.c | 4 + 12 files changed, 688 insertions(+), 21 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 66f80b9ab0c5..98cd52bb005f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -777,6 +777,22 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo amdgpu_umc_poison_handler(adev, reset); } +int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, + uint32_t *payload) +{ + int ret; + + /* Device or IH ring is not ready so bail. */ + ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih); + if (ret) + return ret; + + /* Send payload to fence KFD interrupts */ + amdgpu_amdkfd_interrupt(adev, payload); + + return 0; +} + bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) { if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 94cc456761e5..dd740e64e6e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -250,6 +250,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, struct amdgpu_device *src, bool is_min); int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); +int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, + uint32_t *payload); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 747754428073..2ec8f27c5366 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -53,6 +53,7 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_events.o \ $(AMDKFD_PATH)/cik_event_interrupt.o \ $(AMDKFD_PATH)/kfd_int_process_v9.o \ + $(AMDKFD_PATH)/kfd_int_process_v10.o \ $(AMDKFD_PATH)/kfd_int_process_v11.o \ $(AMDKFD_PATH)/kfd_smi_events.o \ $(AMDKFD_PATH)/kfd_crat.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 17e8e9edccbf..68b657398d41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -125,6 +125,64 @@ bool kfd_dbg_ev_raise(uint64_t event_mask, return is_subscribed; } +/* set pending event queue entry from ring entry */ +bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, + unsigned int pasid, + uint32_t doorbell_id, + uint64_t trap_mask, + void *exception_data, + size_t exception_data_size) +{ + struct kfd_process *p; + bool signaled_to_debugger_or_runtime = false; + + p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return false; + + if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true, + exception_data, exception_data_size)) { + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + + if (!!(trap_mask & KFD_EC_MASK_QUEUE) && + p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { + mutex_lock(&p->mutex); + + pqm = &p->pqm; + list_for_each_entry(pqn, &pqm->queues, + process_queue_list) { + + if (!(pqn->q && pqn->q->device == dev && + pqn->q->doorbell_id == doorbell_id)) + continue; + + kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id, + trap_mask); + + signaled_to_debugger_or_runtime = true; + + break; + } + + mutex_unlock(&p->mutex); + } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { + kfd_dqm_evict_pasid(dev->dqm, p->pasid); + kfd_signal_vm_fault_event(dev, p->pasid, NULL, + exception_data); + + signaled_to_debugger_or_runtime = true; + } + } else { + signaled_to_debugger_or_runtime = true; + } + + kfd_unref_process(p); + + return signaled_to_debugger_or_runtime; +} + int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, @@ -281,6 +339,31 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind kfd_dbg_set_workaround(target, false); } +static void kfd_dbg_clean_exception_status(struct kfd_process *target) +{ + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + int i; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + kfd_process_drain_interrupts(pdd); + + pdd->exception_status = 0; + } + + pqm = &target->pqm; + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn->q) + continue; + + pqn->q->properties.exception_status = 0; + } + + target->exception_status = 0; +} + int kfd_dbg_trap_disable(struct kfd_process *target) { if (!target->debug_trap_enabled) @@ -304,6 +387,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target) } target->debug_trap_enabled = false; + kfd_dbg_clean_exception_status(target); kfd_unref_process(target); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index fca928564948..5153ccbd7fd1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -27,6 +27,12 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count); int kfd_dbg_trap_activate(struct kfd_process *target); +bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, + unsigned int pasid, + uint32_t doorbell_id, + uint64_t trap_mask, + void *exception_data, + size_t exception_data_size); bool kfd_dbg_ev_raise(uint64_t event_mask, struct kfd_process *process, struct kfd_node *dev, unsigned int source_id, bool use_worker, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index f0ed6e6416c3..2c36bb578633 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -140,6 +140,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 1): /* ARCTURUS */ case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ + kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + break; case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ @@ -153,7 +155,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ - kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + kfd->device_info.event_interrupt_class = &event_interrupt_class_v10; break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c new file mode 100644 index 000000000000..c7991e07b6be --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -0,0 +1,405 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kfd_events.h" +#include "kfd_debug.h" +#include "soc15_int.h" +#include "kfd_device_queue_manager.h" + +/* + * GFX10 SQ Interrupts + * + * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit + * packet to the Interrupt Handler: + * Auto - Generated by the SQG (various cmd overflows, timestamps etc) + * Wave - Generated by S_SENDMSG through a shader program + * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) + * + * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus + * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: + * + * - context_id1[7:6] + * Encoding type (0 = Auto, 1 = Wave, 2 = Error) + * + * - context_id0[24] + * PRIV bit indicates that Wave S_SEND or error occurred within trap + * + * - context_id0[22:0] + * 23-bit data with the following layout per encoding type: + * Auto - only context_id0[8:0] is used, which reports various interrupts + * generated by SQG. The rest is 0. + * Wave - user data sent from m0 via S_SENDMSG + * Error - Error type (context_id0[22:19]), Error Details (rest of bits) + * + * The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave + * S_SENDMSG and Errors. These are 0 for Auto. + */ + +enum SQ_INTERRUPT_WORD_ENCODING { + SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, + SQ_INTERRUPT_WORD_ENCODING_INST, + SQ_INTERRUPT_WORD_ENCODING_ERROR, +}; + +enum SQ_INTERRUPT_ERROR_TYPE { + SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0, + SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST, + SQ_INTERRUPT_ERROR_TYPE_MEMVIOL, + SQ_INTERRUPT_ERROR_TYPE_EDC_FED, +}; + +/* SQ_INTERRUPT_WORD_AUTO_CTXID */ +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL__SHIFT 2 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL__SHIFT 3 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 7 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID__SHIFT 4 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6 + +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL_MASK 0x00000004 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL_MASK 0x00000008 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000080 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID_MASK 0x030 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x0c0 + +/* SQ_INTERRUPT_WORD_WAVE_CTXID */ +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID__SHIFT 23 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 24 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 25 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID__SHIFT 30 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 0 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID__SHIFT 4 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6 + +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x000007fffff +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID_MASK 0x0000800000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x00001000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0x0003e000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID_MASK 0x000c0000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x00f +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID_MASK 0x030 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x0c0 + +#define KFD_CTXID0__ERR_TYPE_MASK 0x780000 +#define KFD_CTXID0__ERR_TYPE__SHIFT 19 + +/* GFX10 SQ interrupt ENC type bit (context_id1[7:6]) for wave s_sendmsg */ +#define KFD_CONTEXT_ID1_ENC_TYPE_WAVE_MASK 0x40 +/* GFX10 SQ interrupt PRIV bit (context_id0[24]) for s_sendmsg inside trap */ +#define KFD_CONTEXT_ID0_PRIV_MASK 0x1000000 +/* + * The debugger will send user data(m0) with PRIV=1 to indicate it requires + * notification from the KFD with the following queue id (DOORBELL_ID) and + * trap code (TRAP_CODE). + */ +#define KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK 0x0003ff +#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT 10 +#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK 0x07fc00 +#define KFD_DEBUG_DOORBELL_ID(ctxid0) ((ctxid0) & \ + KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK) +#define KFD_DEBUG_TRAP_CODE(ctxid0) (((ctxid0) & \ + KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK) \ + >> KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT) +#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 +#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 +#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ + KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ + >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) + +static void event_interrupt_poison_consumption(struct kfd_node *dev, + uint16_t pasid, uint16_t client_id) +{ + int old_poison, ret = -EINVAL; + struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + /* all queues of a process will be unmapped in one time */ + old_poison = atomic_cmpxchg(&p->poison, 0, 1); + kfd_unref_process(p); + if (old_poison) + return; + + switch (client_id) { + case SOC15_IH_CLIENTID_SE0SH: + case SOC15_IH_CLIENTID_SE1SH: + case SOC15_IH_CLIENTID_SE2SH: + case SOC15_IH_CLIENTID_SE3SH: + case SOC15_IH_CLIENTID_UTCL2: + ret = kfd_dqm_evict_pasid(dev->dqm, pasid); + break; + case SOC15_IH_CLIENTID_SDMA0: + case SOC15_IH_CLIENTID_SDMA1: + case SOC15_IH_CLIENTID_SDMA2: + case SOC15_IH_CLIENTID_SDMA3: + case SOC15_IH_CLIENTID_SDMA4: + break; + default: + break; + } + + kfd_signal_poison_consumed_event(dev, pasid); + + /* resetting queue passes, do page retirement without gpu reset + * resetting queue fails, fallback to gpu reset solution + */ + if (!ret) { + dev_warn(dev->adev->dev, + "RAS poison consumption, unmap queue flow succeeded: client id %d\n", + client_id); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + } else { + dev_warn(dev->adev->dev, + "RAS poison consumption, fall back to gpu reset flow: client id %d\n", + client_id); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + } +} + +static bool event_interrupt_isr_v10(struct kfd_node *dev, + const uint32_t *ih_ring_entry, + uint32_t *patched_ihre, + bool *patched_flag) +{ + uint16_t source_id, client_id, pasid, vmid; + const uint32_t *data = ih_ring_entry; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle interrupts from KFD VMIDs */ + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && + (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd)) + return false; + + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle clients we care about */ + if (client_id != SOC15_IH_CLIENTID_GRBM_CP && + client_id != SOC15_IH_CLIENTID_SDMA0 && + client_id != SOC15_IH_CLIENTID_SDMA1 && + client_id != SOC15_IH_CLIENTID_SDMA2 && + client_id != SOC15_IH_CLIENTID_SDMA3 && + client_id != SOC15_IH_CLIENTID_SDMA4 && + client_id != SOC15_IH_CLIENTID_SDMA5 && + client_id != SOC15_IH_CLIENTID_SDMA6 && + client_id != SOC15_IH_CLIENTID_SDMA7 && + client_id != SOC15_IH_CLIENTID_VMC && + client_id != SOC15_IH_CLIENTID_VMC1 && + client_id != SOC15_IH_CLIENTID_UTCL2 && + client_id != SOC15_IH_CLIENTID_SE0SH && + client_id != SOC15_IH_CLIENTID_SE1SH && + client_id != SOC15_IH_CLIENTID_SE2SH && + client_id != SOC15_IH_CLIENTID_SE3SH) + return false; + + pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); + pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], + data[4], data[5], data[6], data[7]); + + /* If there is no valid PASID, it's likely a bug */ + if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + return 0; + + /* Interrupt types we care about: various signals and faults. + * They will be forwarded to a work queue (see below). + */ + return source_id == SOC15_INTSRC_CP_END_OF_PIPE || + source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || + source_id == SOC15_INTSRC_CP_BAD_OPCODE || + client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || + client_id == SOC15_IH_CLIENTID_UTCL2 || + KFD_IRQ_IS_FENCE(client_id, source_id); +} + +static void event_interrupt_wq_v10(struct kfd_node *dev, + const uint32_t *ih_ring_entry) +{ + uint16_t source_id, client_id, pasid, vmid; + uint32_t context_id0, context_id1; + uint32_t encoding, sq_intr_err_type; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); + context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry); + + if (client_id == SOC15_IH_CLIENTID_GRBM_CP || + client_id == SOC15_IH_CLIENTID_SE0SH || + client_id == SOC15_IH_CLIENTID_SE1SH || + client_id == SOC15_IH_CLIENTID_SE2SH || + client_id == SOC15_IH_CLIENTID_SE3SH) { + if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) + kfd_signal_event_interrupt(pasid, context_id0, 32); + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) { + encoding = REG_GET_FIELD(context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); + switch (encoding) { + case SQ_INTERRUPT_WORD_ENCODING_AUTO: + pr_debug( + "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + WLT), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF0_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF1_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_UTC_ERROR)); + break; + case SQ_INTERRUPT_WORD_ENCODING_INST: + pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID)); + if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) { + if (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_DEBUG_TRAP_CODE(context_id0), + NULL, 0)) + return; + } + break; + case SQ_INTERRUPT_WORD_ENCODING_ERROR: + sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0, + ERR_TYPE); + pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID), + sq_intr_err_type); + if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && + sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { + event_interrupt_poison_consumption(dev, pasid, source_id); + return; + } + break; + default: + break; + } + kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23); + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), + NULL, + 0); + } + } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || + client_id == SOC15_IH_CLIENTID_SDMA1 || + client_id == SOC15_IH_CLIENTID_SDMA2 || + client_id == SOC15_IH_CLIENTID_SDMA3 || + (client_id == SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid && + KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) || + client_id == SOC15_IH_CLIENTID_SDMA4 || + client_id == SOC15_IH_CLIENTID_SDMA5 || + client_id == SOC15_IH_CLIENTID_SDMA6 || + client_id == SOC15_IH_CLIENTID_SDMA7) { + if (source_id == SOC15_INTSRC_SDMA_TRAP) { + kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); + } else if (source_id == SOC15_INTSRC_SDMA_ECC) { + event_interrupt_poison_consumption(dev, pasid, source_id); + return; + } + } else if (client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || + client_id == SOC15_IH_CLIENTID_UTCL2) { + struct kfd_vm_fault_info info = {0}; + uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + struct kfd_hsa_memory_exception_data exception_data; + + if (client_id == SOC15_IH_CLIENTID_UTCL2 && + amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + event_interrupt_poison_consumption(dev, pasid, client_id); + return; + } + + info.vmid = vmid; + info.mc_id = client_id; + info.page_addr = ih_ring_entry[4] | + (uint64_t)(ih_ring_entry[5] & 0xf) << 32; + info.prot_valid = ring_id & 0x08; + info.prot_read = ring_id & 0x10; + info.prot_write = ring_id & 0x20; + + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.gpu_id = dev->id; + exception_data.va = (info.page_addr) << PAGE_SHIFT; + exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; + exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; + exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; + exception_data.failure.imprecise = 0; + + kfd_set_dbg_ev_from_interrupt(dev, + pasid, + -1, + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), + &exception_data, + sizeof(exception_data)); + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); + } +} + +const struct kfd_event_interrupt_class event_interrupt_class_v10 = { + .interrupt_isr = event_interrupt_isr_v10, + .interrupt_wq = event_interrupt_wq_v10, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index c2166bf964ef..f933bd231fb9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -26,6 +26,7 @@ #include "kfd_device_queue_manager.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "kfd_smi_events.h" +#include "kfd_debug.h" /* * GFX11 SQ Interrupts @@ -238,7 +239,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); /* Only handle interrupts from KFD VMIDs */ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); - if (/*!KFD_IRQ_IS_FENCE(client_id, source_id) &&*/ + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && (vmid < dev->vm_info.first_vmid_kfd || vmid > dev->vm_info.last_vmid_kfd)) return false; @@ -267,7 +268,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || source_id == SOC21_INTSRC_SDMA_TRAP || - /* KFD_IRQ_IS_FENCE(client_id, source_id) || */ + KFD_IRQ_IS_FENCE(client_id, source_id) || (((client_id == SOC21_IH_CLIENTID_VMC) || ((client_id == SOC21_IH_CLIENTID_GFX) && (source_id == UTCL2_1_0__SRCID__FAULT))) && @@ -279,7 +280,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, { uint16_t source_id, client_id, ring_id, pasid, vmid; uint32_t context_id0, context_id1; - uint8_t sq_int_enc, sq_int_errtype; + uint8_t sq_int_enc, sq_int_priv, sq_int_errtype; struct kfd_vm_fault_info info = {0}; struct kfd_hsa_memory_exception_data exception_data; @@ -312,9 +313,9 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; exception_data.failure.imprecise = 0; - /*kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, + kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), - &exception_data, sizeof(exception_data));*/ + &exception_data, sizeof(exception_data)); kfd_smi_event_update_vmfault(dev, pasid); /* GRBM, SDMA, SE, PMM */ @@ -324,11 +325,11 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, /* CP */ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) kfd_signal_event_interrupt(pasid, context_id0, 32); - /*else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_CTXID0_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), - NULL, 0);*/ + NULL, 0); /* SDMA */ else if (source_id == SOC21_INTSRC_SDMA_TRAP) @@ -348,6 +349,13 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, break; case SQ_INTERRUPT_WORD_ENCODING_INST: print_sq_intr_info_inst(context_id0, context_id1); + sq_int_priv = REG_GET_FIELD(context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); + if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_CTXID0_DOORBELL_ID(context_id0), + KFD_CTXID0_TRAP_CODE(context_id0), + NULL, 0))) + return; break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: print_sq_intr_info_error(context_id0, context_id1); @@ -366,8 +374,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); } - /*} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { - kfd_process_close_interrupt_drain(pasid);*/ + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 8cf58be80f4e..d5c9f30552e3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -23,10 +23,40 @@ #include "kfd_priv.h" #include "kfd_events.h" +#include "kfd_debug.h" #include "soc15_int.h" #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" +/* + * GFX9 SQ Interrupts + * + * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit + * packet to the Interrupt Handler: + * Auto - Generated by the SQG (various cmd overflows, timestamps etc) + * Wave - Generated by S_SENDMSG through a shader program + * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) + * + * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus + * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: + * + * - context_id0[27:26] + * Encoding type (0 = Auto, 1 = Wave, 2 = Error) + * + * - context_id0[13] + * PRIV bit indicates that Wave S_SEND or error occurred within trap + * + * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]} + * 24-bit data with the following layout per encoding type: + * Auto - only context_id0[8:0] is used, which reports various interrupts + * generated by SQG. The rest is 0. + * Wave - user data sent from m0 via S_SENDMSG + * Error - Error type (context_id1[7:4]), Error Details (rest of bits) + * + * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave + * S_SENDMSG and Errors. These are 0 for Auto. + */ + enum SQ_INTERRUPT_WORD_ENCODING { SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, SQ_INTERRUPT_WORD_ENCODING_INST, @@ -84,12 +114,32 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000 #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000 +/* GFX9 SQ interrupt 24-bit data from context_id<0,1> */ #define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \ ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000)) #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 +/* + * The debugger will send user data(m0) with PRIV=1 to indicate it requires + * notification from the KFD with the following queue id (DOORBELL_ID) and + * trap code (TRAP_CODE). + */ +#define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff +#define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10 +#define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00 +#define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \ + KFD_INT_DATA_DEBUG_DOORBELL_MASK) +#define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \ + KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \ + >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT) +#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 +#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 +#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ + KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ + >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) + static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { @@ -168,14 +218,16 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, uint16_t source_id, client_id, pasid, vmid; const uint32_t *data = ih_ring_entry; - /* Only handle interrupts from KFD VMIDs */ - vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); - if (vmid < dev->vm_info.first_vmid_kfd || - vmid > dev->vm_info.last_vmid_kfd) - return false; - source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle interrupts from KFD VMIDs */ + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && + (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd)) + return false; + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); /* Only handle clients we care about */ @@ -194,7 +246,8 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, client_id != SOC15_IH_CLIENTID_SE0SH && client_id != SOC15_IH_CLIENTID_SE1SH && client_id != SOC15_IH_CLIENTID_SE2SH && - client_id != SOC15_IH_CLIENTID_SE3SH) + client_id != SOC15_IH_CLIENTID_SE3SH && + !KFD_IRQ_IS_FENCE(client_id, source_id)) return false; /* This is a known issue for gfx9. Under non HWS, pasid is not set @@ -247,6 +300,7 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, source_id == SOC15_INTSRC_SDMA_ECC || source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || + KFD_IRQ_IS_FENCE(client_id, source_id) || ((client_id == SOC15_IH_CLIENTID_VMC || client_id == SOC15_IH_CLIENTID_VMC1 || client_id == SOC15_IH_CLIENTID_UTCL2) && @@ -302,6 +356,13 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), sq_int_data); + if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) { + if (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(sq_int_data), + KFD_DEBUG_TRAP_CODE(sq_int_data), + NULL, 0)) + return; + } break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE); @@ -324,8 +385,12 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) - kfd_signal_hw_exception_event(pasid); + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), + NULL, 0); + } } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || client_id == SOC15_IH_CLIENTID_SDMA1 || client_id == SOC15_IH_CLIENTID_SDMA2 || @@ -345,6 +410,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + struct kfd_hsa_memory_exception_data exception_data; if (client_id == SOC15_IH_CLIENTID_UTCL2 && amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { @@ -360,9 +426,23 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, info.prot_read = ring_id & 0x10; info.prot_write = ring_id & 0x20; + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.gpu_id = dev->id; + exception_data.va = (info.page_addr) << PAGE_SHIFT; + exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; + exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; + exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; + exception_data.failure.imprecise = 0; + + kfd_set_dbg_ev_from_interrupt(dev, + pasid, + -1, + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), + &exception_data, + sizeof(exception_data)); kfd_smi_event_update_vmfault(dev, pasid); - kfd_dqm_evict_pasid(dev->dqm, pasid); - kfd_signal_vm_fault_event(dev, pasid, &info, NULL); + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index a02fb939614a..cd2d56e5cdf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -963,6 +963,10 @@ struct kfd_process { uint64_t exception_enable_mask; uint64_t exception_status; + /* Used to drain stale interrupts */ + wait_queue_head_t wait_irq_drain; + bool irq_drain_is_open; + /* shared virtual memory registered by this process */ struct svm_range_list svms; @@ -1144,12 +1148,19 @@ int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ +#define KFD_IRQ_FENCE_CLIENTID 0xff +#define KFD_IRQ_FENCE_SOURCEID 0xff +#define KFD_IRQ_IS_FENCE(client, source) \ + ((client) == KFD_IRQ_FENCE_CLIENTID && \ + (source) == KFD_IRQ_FENCE_SOURCEID) int kfd_interrupt_init(struct kfd_node *dev); void kfd_interrupt_exit(struct kfd_node *dev); bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); +int kfd_process_drain_interrupts(struct kfd_process_device *pdd); +void kfd_process_close_interrupt_drain(unsigned int pasid); /* amdkfd Apertures */ int kfd_init_apertures(struct kfd_process *process); @@ -1421,6 +1432,7 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ extern const struct kfd_event_interrupt_class event_interrupt_class_cik; extern const struct kfd_event_interrupt_class event_interrupt_class_v9; +extern const struct kfd_event_interrupt_class event_interrupt_class_v10; extern const struct kfd_event_interrupt_class event_interrupt_class_v11; extern const struct kfd_device_global_init_class device_global_init_class_cik; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 3b7f219c9d06..3d3611705d41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -862,6 +862,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) kfd_procfs_add_sysfs_stats(process); kfd_procfs_add_sysfs_files(process); kfd_procfs_add_sysfs_counters(process); + + init_waitqueue_head(&process->wait_irq_drain); } out: if (!IS_ERR(process)) @@ -2136,6 +2138,51 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) } } +/* assumes caller holds process lock. */ +int kfd_process_drain_interrupts(struct kfd_process_device *pdd) +{ + uint32_t irq_drain_fence[8]; + int r = 0; + + if (!KFD_IS_SOC15(pdd->dev)) + return 0; + + pdd->process->irq_drain_is_open = true; + + memset(irq_drain_fence, 0, sizeof(irq_drain_fence)); + irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) | + KFD_IRQ_FENCE_CLIENTID; + irq_drain_fence[3] = pdd->process->pasid; + + /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ + if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, + irq_drain_fence)) { + pdd->process->irq_drain_is_open = false; + return 0; + } + + r = wait_event_interruptible(pdd->process->wait_irq_drain, + !READ_ONCE(pdd->process->irq_drain_is_open)); + if (r) + pdd->process->irq_drain_is_open = false; + + return r; +} + +void kfd_process_close_interrupt_drain(unsigned int pasid) +{ + struct kfd_process *p; + + p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + WRITE_ONCE(p->irq_drain_is_open, false); + wake_up_all(&p->wait_irq_drain); + kfd_unref_process(p); +} + struct send_exception_work_handler_workarea { struct work_struct work; struct kfd_process *p; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43d432b5c5bc..70852a200d8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -337,6 +337,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, kq->queue->properties.queue_id = *qid; pqn->kq = kq; pqn->q = NULL; + retval = kfd_process_drain_interrupts(pdd); + if (retval) + break; + retval = dev->dqm->ops.create_kernel_queue(dev->dqm, kq, &pdd->qpd); break; From e90bf919f714ae2a658cdfd03238e7be9ce9185c Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 27 Apr 2022 10:24:37 -0400 Subject: [PATCH 766/861] drm/amdkfd: add debug set exceptions enabled operation The debugger subscibes to nofication for requested exceptions on attach. Allow the debugger to change its subsciption later on. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 36 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 2 ++ 3 files changed, 41 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 615fa9ab36b7..dcf4b5f3886e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2980,6 +2980,9 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->send_runtime_event.exception_mask); break; case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: + kfd_dbg_set_enabled_debug_exception_mask(target, + args->set_exceptions_enabled.exception_mask); + break; case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 68b657398d41..48a4e3cc2234 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -521,3 +521,39 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, return r; } + +void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, + uint64_t exception_set_mask) +{ + uint64_t found_mask = 0; + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + static const char write_data = '.'; + loff_t pos = 0; + int i; + + mutex_lock(&target->event_mutex); + + found_mask |= target->exception_status; + + pqm = &target->pqm; + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn) + continue; + + found_mask |= pqn->q->properties.exception_status; + } + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + found_mask |= pdd->exception_status; + } + + if (exception_set_mask & found_mask) + kernel_write(target->dbg_ev_file, &write_data, 1, &pos); + + target->exception_enable_mask = exception_set_mask; + + mutex_unlock(&target->event_mutex); +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 5153ccbd7fd1..6c1054a08872 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -56,6 +56,8 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) void debug_event_write_work_handler(struct work_struct *work); +void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, + uint64_t exception_set_mask); /* * If GFX off is enabled, chips that do not support RLC restore for the debug * registers will disable GFX off temporarily for the entire debug session. From 101827e13026a981e887527620fe9710adc0e481 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 27 Apr 2022 13:18:10 -0400 Subject: [PATCH 767/861] drm/amdkfd: add debug wave launch override operation This operation allows the debugger to override the enabled HW exceptions on the device. On debug devices that only support the debugging of a single process, the HW exceptions are global and set through the SPI_GDBG_TRAP_MASK register. Because they are global, only address watch exceptions are allowed to be enabled. In other words, the debugger must preserve all non-address watch exception states in normal mode operation by barring a full replacement override or a non-address watch override request. For multi-process debugging, all HW exception overrides are per-VMID so all exceptions can be overridden or fully replaced. In order for the debugger to know what is permissible, returned the supported override mask back to the debugger along with the previously enable overrides. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 47 ++++++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 + .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 55 ++++++++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 10 +++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 5 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 87 ++++++++++++++++++- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 55 ++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 10 +++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 69 +++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 6 ++ 11 files changed, 351 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index b811a0985050..d7881bbd828d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -25,6 +25,7 @@ #include "amdgpu_amdkfd_gfx_v9.h" #include "gc/gc_9_4_2_offset.h" #include "gc/gc_9_4_2_sh_mask.h" +#include /* * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. @@ -62,6 +63,50 @@ static uint32_t kgd_aldebaran_disable_debug_trap(struct amdgpu_device *adev, return data; } +static int kgd_aldebaran_validate_trap_override_request(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported) +{ + *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION; + + if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR && + trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE) + return -EPERM; + + return 0; +} + +/* returns TRAP_EN, EXCP_EN and EXCP_RPLACE. */ +static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev) + +{ + uint32_t data = 0; + + *trap_mask_prev = REG_GET_FIELD(kfd_dbg_trap_cntl_prev, SPI_GDBG_PER_VMID_CNTL, EXCP_EN); + trap_mask_bits = (trap_mask_bits & trap_mask_request) | + (*trap_mask_prev & ~trap_mask_request); + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, trap_mask_bits); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override); + + return data; +} + const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -82,6 +127,8 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .enable_debug_trap = kgd_aldebaran_enable_debug_trap, .disable_debug_trap = kgd_aldebaran_disable_debug_trap, + .validate_trap_override_request = kgd_aldebaran_validate_trap_override_request, + .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index a62bd0068515..ec2587664001 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -410,6 +410,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_arcturus_enable_debug_trap, .disable_debug_trap = kgd_arcturus_disable_debug_trap, + .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, + .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 98006c7021dd..7ea0362dcab3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -32,6 +32,7 @@ #include "v10_structs.h" #include "nv.h" #include "nvd.h" +#include enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -803,6 +804,58 @@ uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, return 0; } +int kgd_gfx_v10_validate_trap_override_request(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported) +{ + *trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH; + + /* The SPI_GDBG_TRAP_MASK register is global and affects all + * processes. Only allow OR-ing the address-watch bit, since + * this only affects processes under the debugger. Other bits + * should stay 0 to avoid the debugger interfering with other + * processes. + */ + if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR) + return -EINVAL; + + return 0; +} + +uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev) +{ + uint32_t data, wave_cntl_prev; + + mutex_lock(&adev->grbm_idx_mutex); + + wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true); + + data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK)); + *trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN); + + trap_mask_bits = (trap_mask_bits & trap_mask_request) | + (*trap_mask_prev & ~trap_mask_request); + + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); + + /* We need to preserve wave launch mode stall settings. */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + /* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -889,6 +942,8 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .set_vm_context_page_table_base = set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, + .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, + .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 1e993a213646..57339fa12807 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -26,6 +26,16 @@ uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +int kgd_gfx_v10_validate_trap_override_request(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported); +uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev); void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 387bdf4823c9..7120927fed15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -675,5 +675,8 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, - .disable_debug_trap = kgd_gfx_v10_disable_debug_trap + .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, + .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, + .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override + }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index cc954cf248ca..ae0c4707919f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -30,6 +30,7 @@ #include "soc15d.h" #include "v11_structs.h" #include "soc21.h" +#include enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -643,6 +644,88 @@ static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev, return data; } +static int kgd_gfx_v11_validate_trap_override_request(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported) +{ + *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION; + + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 4)) + *trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START | + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; + + if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR && + trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE) + return -EPERM; + + return 0; +} + +static uint32_t trap_mask_map_sw_to_hw(uint32_t mask) +{ + uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0; + uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0; + uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION); + uint32_t ret; + + ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en); + ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start); + ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end); + + return ret; +} + +static uint32_t trap_mask_map_hw_to_sw(uint32_t mask) +{ + uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN); + + if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START)) + ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START; + + if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END)) + ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; + + return ret; +} + +/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_gfx_v11_set_wave_launch_trap_override(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev) +{ + uint32_t data = 0; + + *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev); + + data = (trap_mask_bits & trap_mask_request) | (*trap_mask_prev & ~trap_mask_request); + data = trap_mask_map_sw_to_hw(data); + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override); + + return data; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -660,5 +743,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = NULL, .set_vm_context_page_table_base = set_vm_context_page_table_base_v11, .enable_debug_trap = kgd_gfx_v11_enable_debug_trap, - .disable_debug_trap = kgd_gfx_v11_disable_debug_trap + .disable_debug_trap = kgd_gfx_v11_disable_debug_trap, + .validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request, + .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 829ee720cc44..ce5c256f0517 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -38,6 +38,7 @@ #include "soc15d.h" #include "gfx_v9_0.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -739,6 +740,58 @@ uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, return 0; } +int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported) +{ + *trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH; + + /* The SPI_GDBG_TRAP_MASK register is global and affects all + * processes. Only allow OR-ing the address-watch bit, since + * this only affects processes under the debugger. Other bits + * should stay 0 to avoid the debugger interfering with other + * processes. + */ + if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR) + return -EINVAL; + + return 0; +} + +uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_cntl_prev) +{ + uint32_t data, wave_cntl_prev; + + mutex_lock(&adev->grbm_idx_mutex); + + wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true); + + data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK)); + *trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN); + + trap_mask_bits = (trap_mask_bits & trap_mask_request) | + (*trap_mask_prev & ~trap_mask_request); + + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); + + /* We need to preserve wave launch mode stall settings. */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + /* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -1010,6 +1063,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v9_enable_debug_trap, .disable_debug_trap = kgd_gfx_v9_disable_debug_trap, + .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, + .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index fed5b7f18b1a..76812ddd35b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -71,6 +71,16 @@ uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported); +uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev); void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index dcf4b5f3886e..7fa249807671 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2984,6 +2984,13 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->set_exceptions_enabled.exception_mask); break; case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: + r = kfd_dbg_trap_set_wave_launch_override(target, + args->launch_override.override_mode, + args->launch_override.enable_mask, + args->launch_override.support_request_mask, + &args->launch_override.enable_mask, + &args->launch_override.support_request_mask); + break; case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: case KFD_IOC_DBG_TRAP_RESUME_QUEUES: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 48a4e3cc2234..733390fb2459 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -522,6 +522,75 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, return r; } +static int kfd_dbg_validate_trap_override_request(struct kfd_process *p, + uint32_t trap_override, + uint32_t trap_mask_request, + uint32_t *trap_mask_supported) +{ + int i = 0; + + *trap_mask_supported = 0xffffffff; + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + int err = pdd->dev->kfd2kgd->validate_trap_override_request( + pdd->dev->adev, + trap_override, + trap_mask_supported); + + if (err) + return err; + } + + if (trap_mask_request & ~*trap_mask_supported) + return -EACCES; + + return 0; +} + +int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t *trap_mask_supported) +{ + int r = 0, i; + + r = kfd_dbg_validate_trap_override_request(target, + trap_override, + trap_mask_request, + trap_mask_supported); + + if (r) + return r; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override( + pdd->dev->adev, + pdd->dev->vm_info.last_vmid_kfd, + trap_override, + trap_mask_bits, + trap_mask_request, + trap_mask_prev, + pdd->spi_dbg_override); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_refresh_runlist(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + if (r) + break; + } + + return r; +} + void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, uint64_t exception_set_mask) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 6c1054a08872..c9245221aa76 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -42,6 +42,12 @@ int kfd_dbg_trap_disable(struct kfd_process *target); int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, uint32_t *runtime_info_size); +int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t *trap_mask_supported); int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, From aea1b4738bebd8092bd437ce0b03aa9587fc20a7 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 2 May 2022 11:45:05 -0400 Subject: [PATCH 768/861] drm/amdkfd: add debug wave launch mode operation Allow the debugger to set wave behaviour on to either normally operate, halt at launch, trap on every instruction, terminate immediately or stall on allocation. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 12 +++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 1 + .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 25 +++++++++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 3 ++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 3 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 14 +++++++- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 25 +++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 36 ++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 2 ++ 11 files changed, 124 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index d7881bbd828d..774ecfc3451a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -107,6 +107,17 @@ static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device return data; } +static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode); + + return data; +} + const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -129,6 +140,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .disable_debug_trap = kgd_aldebaran_disable_debug_trap, .validate_trap_override_request = kgd_aldebaran_validate_trap_override_request, .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index ec2587664001..fbdc1b7b1e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -412,6 +412,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .disable_debug_trap = kgd_arcturus_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 7ea0362dcab3..a7a6edda557f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -856,6 +856,30 @@ uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev, return 0; } +uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid) +{ + uint32_t data = 0; + bool is_mode_set = !!wave_launch_mode; + + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true); + + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, + VMID_MASK, is_mode_set ? 1 << vmid : 0); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, + MODE, is_mode_set ? wave_launch_mode : 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); + + kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + /* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -944,6 +968,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 57339fa12807..3a6aca2b0eaa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -36,6 +36,9 @@ uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev, uint32_t trap_mask_request, uint32_t *trap_mask_prev, uint32_t kfd_dbg_trap_cntl_prev); +uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid); void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 7120927fed15..ed36b433a48b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -677,6 +677,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, - .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override + .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index ae0c4707919f..9711d5128d09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -726,6 +726,17 @@ static uint32_t kgd_gfx_v11_set_wave_launch_trap_override(struct amdgpu_device * return data; } +static uint32_t kgd_gfx_v11_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode); + + return data; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -745,5 +756,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .enable_debug_trap = kgd_gfx_v11_enable_debug_trap, .disable_debug_trap = kgd_gfx_v11_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request, - .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override + .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index ce5c256f0517..0acc0c18dfe6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -792,6 +792,30 @@ uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev, return 0; } +uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid) +{ + uint32_t data = 0; + bool is_mode_set = !!wave_launch_mode; + + mutex_lock(&adev->grbm_idx_mutex); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true); + + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, + VMID_MASK, is_mode_set ? 1 << vmid : 0); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, + MODE, is_mode_set ? wave_launch_mode : 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); + + kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + /* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -1065,6 +1089,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .disable_debug_trap = kgd_gfx_v9_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 76812ddd35b1..18f4970ac8e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -74,6 +74,9 @@ uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev, uint32_t trap_override, uint32_t *trap_mask_supported); +uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid); uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev, uint32_t vmid, uint32_t trap_override, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 7fa249807671..a6570b124b2b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2992,6 +2992,9 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v &args->launch_override.support_request_mask); break; case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: + r = kfd_dbg_trap_set_wave_launch_mode(target, + args->launch_mode.launch_mode); + break; case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: case KFD_IOC_DBG_TRAP_RESUME_QUEUES: case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 733390fb2459..53c3418562d4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -301,8 +301,10 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind { int i; - if (!unwind) + if (!unwind) { cancel_work_sync(&target->debug_event_workarea); + kfd_dbg_trap_set_wave_launch_mode(target, 0); + } for (i = 0; i < target->n_pdds; i++) { struct kfd_process_device *pdd = target->pdds[i]; @@ -591,6 +593,38 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, return r; } +int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, + uint8_t wave_launch_mode) +{ + int r = 0, i; + + if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL && + wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT && + wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG) + return -EINVAL; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode( + pdd->dev->adev, + wave_launch_mode, + pdd->dev->vm_info.last_vmid_kfd); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_refresh_runlist(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + if (r) + break; + } + + return r; +} + void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, uint64_t exception_set_mask) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index c9245221aa76..cb17869437c5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -48,6 +48,8 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, uint32_t trap_mask_request, uint32_t *trap_mask_prev, uint32_t *trap_mask_supported); +int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, + uint8_t wave_launch_mode); int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, From a70a93fa568b4f05aba548dadb673703eccf5480 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 5 May 2022 16:15:37 -0400 Subject: [PATCH 769/861] drm/amdkfd: add debug suspend and resume process queues operation In order to inspect waves from the saved context at any point during a debug session, the debugger must be able to preempt queues to trigger context save by suspending them. On queue suspend, the KFD will copy the context save header information so that the debugger can correctly crawl the appropriate size of the saved context. The debugger must then also be allowed to resume suspended queues. A queue that is newly created cannot be suspended because queue ids are recycled after destruction so the debugger needs to know that this has occurred. Query functions will be later added that will clear a given queue of its new queue status. A queue cannot be destroyed while it is suspended to preserve its saved context during debugger inspection. Have queue destruction block while a queue is suspended and unblocked when it is resumed. Likewise, if a queue is about to be destroyed, it cannot be suspended. Return the number of queues successfully suspended or resumed along with a per queue status array where the upper bits per queue status show that the request was invalid (new/destroyed queue suspend request, missing queue) or an error occurred (HWS in a fatal state so it can't suspend or resume queues). Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 11 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 7 + .../drm/amd/amdkfd/kfd_device_queue_manager.c | 447 +++++++++++++++++- .../drm/amd/amdkfd/kfd_device_queue_manager.h | 10 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 10 + .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 15 +- .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +- .../amd/amdkfd/kfd_process_queue_manager.c | 1 + 11 files changed, 512 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 98cd52bb005f..b4fcad0e62f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -772,6 +772,11 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) return adev->have_atomics_support; } +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) +{ + amdgpu_device_flush_hdp(adev, NULL); +} + void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) { amdgpu_umc_poison_handler(adev, reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index dd740e64e6e1..2d0406bff84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -322,6 +322,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t *mmap_offset); int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, struct dma_buf **dmabuf); +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a6570b124b2b..1fae97df7a1e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -410,6 +410,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pr_debug("Write ptr address == 0x%016llX\n", args->write_pointer_address); + kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); return 0; err_create_queue: @@ -2996,7 +2997,17 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->launch_mode.launch_mode); break; case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: + r = suspend_queues(target, + args->suspend_queues.num_queues, + args->suspend_queues.grace_period, + args->suspend_queues.exception_mask, + (uint32_t *)args->suspend_queues.queue_array_ptr); + + break; case KFD_IOC_DBG_TRAP_RESUME_QUEUES: + r = resume_queues(target, args->resume_queues.num_queues, + (uint32_t *)args->resume_queues.queue_array_ptr); + break; case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_SET_FLAGS: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 53c3418562d4..f4d3dfb35cb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -339,6 +339,13 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind } kfd_dbg_set_workaround(target, false); + + if (!unwind) { + int resume_count = resume_queues(target, 0, NULL); + + if (resume_count) + pr_debug("Resumed %d queues\n", resume_count); + } } static void kfd_dbg_clean_exception_status(struct kfd_process *target) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 44d87943e40a..bc9e81293165 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -952,6 +952,92 @@ out_unlock: return retval; } +/* suspend_single_queue does not lock the dqm like the + * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int suspend_single_queue(struct device_queue_manager *dqm, + struct kfd_process_device *pdd, + struct queue *q) +{ + bool is_new; + + if (q->properties.is_suspended) + return 0; + + pr_debug("Suspending PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); + + if (is_new || q->properties.is_being_destroyed) { + pr_debug("Suspend: skip %s queue id %i\n", + is_new ? "new" : "destroyed", + q->properties.queue_id); + return -EBUSY; + } + + q->properties.is_suspended = true; + if (q->properties.is_active) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = remove_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + decrement_queue_count(dqm, &pdd->qpd, q); + q->properties.is_active = false; + } + + return 0; +} + +/* resume_single_queue does not lock the dqm like the functions + * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int resume_single_queue(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + struct kfd_process_device *pdd; + + if (!q->properties.is_suspended) + return 0; + + pdd = qpd_to_pdd(qpd); + + pr_debug("Restoring from suspend PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + q->properties.is_suspended = false; + + if (QUEUE_IS_ACTIVE(q->properties)) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = add_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + q->properties.is_active = true; + increment_queue_count(dqm, qpd, q); + } + + return 0; +} + static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1926,6 +2012,31 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, return map_queues_cpsch(dqm); } +static int wait_on_destroy_queue(struct device_queue_manager *dqm, + struct queue *q) +{ + struct kfd_process_device *pdd = kfd_get_process_device_data(q->device, + q->process); + int ret = 0; + + if (pdd->qpd.is_debug) + return ret; + + q->properties.is_being_destroyed = true; + + if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { + dqm_unlock(dqm); + mutex_unlock(&q->process->mutex); + ret = wait_event_interruptible(dqm->destroy_wait, + !q->properties.is_suspended); + + mutex_lock(&q->process->mutex); + dqm_lock(dqm); + } + + return ret; +} + static int destroy_queue_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) @@ -1945,11 +2056,16 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, q->properties.queue_id); } - retval = 0; - /* remove queue from list to prevent rescheduling after preemption */ dqm_lock(dqm); + retval = wait_on_destroy_queue(dqm, q); + + if (retval) { + dqm_unlock(dqm); + return retval; + } + if (qpd->is_debug) { /* * error, currently we do not allow to destroy a queue @@ -1996,7 +2112,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, dqm_unlock(dqm); - /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */ + /* + * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid + * circular locking + */ + kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), + qpd->pqm->process, q->device, + -1, false, NULL, 0); + mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); return retval; @@ -2461,8 +2584,10 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) goto out_free; } - if (!dqm->ops.initialize(dqm)) + if (!dqm->ops.initialize(dqm)) { + init_waitqueue_head(&dqm->destroy_wait); return dqm; + } out_free: kfree(dqm); @@ -2602,6 +2727,320 @@ out_unlock: return r; } +#define QUEUE_NOT_FOUND -1 +/* invalidate queue operation in array */ +static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; +} + +/* find queue index in array */ +static int q_array_get_index(unsigned int queue_id, + uint32_t num_queues, + uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) + return i; + + return QUEUE_NOT_FOUND; +} + +struct copy_context_work_handler_workarea { + struct work_struct copy_context_work; + struct kfd_process *p; +}; + +static void copy_context_work_handler (struct work_struct *work) +{ + struct copy_context_work_handler_workarea *workarea; + struct mqd_manager *mqd_mgr; + struct queue *q; + struct mm_struct *mm; + struct kfd_process *p; + uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; + int i; + + workarea = container_of(work, + struct copy_context_work_handler_workarea, + copy_context_work); + + p = workarea->p; + mm = get_task_mm(p->lead_thread); + + if (!mm) + return; + + kthread_use_mm(mm); + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + + list_for_each_entry(q, &qpd->queues_list, list) { + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; + + /* We ignore the return value from get_wave_state + * because + * i) right now, it always returns 0, and + * ii) if we hit an error, we would continue to the + * next queue anyway. + */ + mqd_mgr->get_wave_state(mqd_mgr, + q->mqd, + &q->properties, + (void __user *) q->properties.ctx_save_restore_area_address, + &tmp_ctl_stack_used_size, + &tmp_save_area_used_size); + } + } + kthread_unuse_mm(mm); + mmput(mm); +} + +static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) +{ + size_t array_size = num_queues * sizeof(uint32_t); + uint32_t *queue_ids = NULL; + + if (!usr_queue_id_array) + return NULL; + + queue_ids = kzalloc(array_size, GFP_KERNEL); + if (!queue_ids) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(queue_ids, usr_queue_id_array, array_size)) + return ERR_PTR(-EFAULT); + + return queue_ids; +} + +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = NULL; + int total_resumed = 0; + int i; + + if (usr_queue_id_array) { + queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. unmask per successful request */ + q_array_invalidate(num_queues, queue_ids); + } + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_resumed = 0; + + dqm_lock(dqm); + + /* unmask queues that resume or already resumed as valid */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = QUEUE_NOT_FOUND; + + if (queue_ids) + q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { + int err = resume_single_queue(dqm, &pdd->qpd, q); + + if (queue_ids) { + if (!err) { + queue_ids[q_idx] &= + ~KFD_DBG_QUEUE_INVALID_MASK; + } else { + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + + if (dqm->dev->kfd->shared_resources.enable_mes) { + wake_up_all(&dqm->destroy_wait); + if (!err) + total_resumed++; + } else { + per_device_resumed++; + } + } + } + + if (!per_device_resumed) { + dqm_unlock(dqm); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, + 0, + USE_DEFAULT_GRACE_PERIOD); + if (r) { + pr_err("Failed to resume process queues\n"); + if (queue_ids) { + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + /* mask queue as error on resume fail */ + if (q_idx != QUEUE_NOT_FOUND) + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + } + } + } else { + wake_up_all(&dqm->destroy_wait); + total_resumed += per_device_resumed; + } + + dqm_unlock(dqm); + } + + if (queue_ids) { + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue resume\n"); + + kfree(queue_ids); + } + + return total_resumed; +} + +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + int total_suspended = 0; + int i; + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. umask on successful request */ + q_array_invalidate(num_queues, queue_ids); + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_suspended = 0; + + mutex_lock(&p->event_mutex); + dqm_lock(dqm); + + /* unmask queues that suspend or already suspended */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, + queue_ids); + + if (q_idx != QUEUE_NOT_FOUND) { + int err = suspend_single_queue(dqm, pdd, q); + bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; + + if (!err) { + queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; + if (exception_clear_mask && is_mes) + q->properties.exception_status &= + ~exception_clear_mask; + + if (is_mes) + total_suspended++; + else + per_device_suspended++; + } else if (err != -EBUSY) { + r = err; + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + } + + if (!per_device_suspended) { + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + if (total_suspended) + amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + grace_period); + + if (r) + pr_err("Failed to suspend process queues.\n"); + else + total_suspended += per_device_suspended; + + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, queue_ids); + + if (q_idx == QUEUE_NOT_FOUND) + continue; + + /* mask queue as error on suspend fail */ + if (r) + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + else if (exception_clear_mask) + q->properties.exception_status &= + ~exception_clear_mask; + } + + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + amdgpu_device_flush_hdp(dqm->dev->adev, NULL); + } + + if (total_suspended) { + struct copy_context_work_handler_workarea copy_context_worker; + + INIT_WORK_ONSTACK( + ©_context_worker.copy_context_work, + copy_context_work_handler); + + copy_context_worker.p = p; + + schedule_work(©_context_worker.copy_context_work); + + + flush_work(©_context_worker.copy_context_work); + destroy_work_on_stack(©_context_worker.copy_context_work); + } + + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue suspend\n"); + + kfree(queue_ids); + + return total_suspended; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bb75d93712eb..d4e6dbffe8c2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -263,6 +263,8 @@ struct device_queue_manager { uint32_t current_logical_xcc_start; uint32_t wait_times; + + wait_queue_head_t destroy_wait; }; void device_queue_manager_init_cik( @@ -290,6 +292,14 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array); +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index a0ac4f2fe6b5..94c0fc2e57b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -237,6 +237,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v10_compute_mqd *m; + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -255,6 +256,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * accessible to user mode */ + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 9a9b4e853516..31fec5e70d13 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -291,7 +291,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v11_compute_mqd *m; - /*struct mqd_user_context_save_area_header header;*/ + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -309,16 +309,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * it's part of the context save area that is already * accessible to user mode */ -/* - header.control_stack_size = *ctl_stack_used_size; - header.wave_state_size = *save_area_used_size; + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; - header.wave_state_offset = m->cp_hqd_wg_state_offset; - header.control_stack_offset = m->cp_hqd_cntl_stack_offset; + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; - if (copy_to_user(ctl_stack, &header, sizeof(header))) + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) return -EFAULT; -*/ + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 5b87c244e909..601bb9f68048 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -311,6 +311,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v9_mqd *m; + struct kfd_context_save_area_header header; /* Control stack is located one page after MQD. */ void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); @@ -322,7 +323,18 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, *save_area_used_size = m->cp_hqd_wg_state_offset - m->cp_hqd_cntl_stack_size; - if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + + if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset, + mqd_ctl_stack + m->cp_hqd_cntl_stack_offset, + *ctl_stack_used_size)) return -EFAULT; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index cd2d56e5cdf0..05da43bf233a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -510,6 +510,8 @@ struct queue_properties { uint32_t doorbell_off; bool is_interop; bool is_evicted; + bool is_suspended; + bool is_being_destroyed; bool is_active; bool is_gws; uint32_t pm4_target_xcc; @@ -535,7 +537,8 @@ struct queue_properties { #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ (q).queue_address != 0 && \ (q).queue_percent > 0 && \ - !(q).is_evicted) + !(q).is_evicted && \ + !(q).is_suspended) enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 70852a200d8f..01ccab607a69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -187,6 +187,7 @@ static int init_user_queue(struct process_queue_manager *pqm, /* Doorbell initialized in user space*/ q_properties->doorbell_ptr = NULL; + q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW); /* let DQM handle it*/ q_properties->vmid = 0; From e0f85f4690d089cc1a60337decafb1acf7eec45e Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 6 May 2022 14:58:55 -0400 Subject: [PATCH 770/861] drm/amdkfd: add debug set and clear address watch points operation Shader read, write and atomic memory operations can be alerted to the debugger as an address watch exception. Allow the debugger to pass in a watch point to a particular memory address per device. Note that there exists only 4 watch points per devices to date, so have the KFD keep track of what watch points are allocated or not. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 51 +++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 + .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 78 ++++++++++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 8 ++ .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 5 +- .../drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 52 ++++++- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 77 ++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 8 ++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 24 ++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 136 ++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 6 +- 13 files changed, 452 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 774ecfc3451a..efd6a72aab4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -118,6 +118,55 @@ static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, return data; } +#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) +static uint32_t kgd_gfx_aldebaran_set_address_watch( + struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 6); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + return watch_address_cntl; +} + +uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + return 0; +} + const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -141,6 +190,8 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .validate_trap_override_request = kgd_aldebaran_validate_trap_override_request, .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, + .set_address_watch = kgd_gfx_aldebaran_set_address_watch, + .clear_address_watch = kgd_gfx_aldebaran_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index fbdc1b7b1e42..6df215aba4c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -413,6 +413,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v9_set_address_watch, + .clear_address_watch = kgd_gfx_v9_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index a7a6edda557f..8ad7a7779e14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -880,6 +880,82 @@ uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, return 0; } +#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H) +uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VMID, + debug_vmid); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 7); + + /* Turning off this watch point until we set all the registers */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 0); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + /* Enable the watch point */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + +uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + + /* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -969,6 +1045,8 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v10_set_address_watch, + .clear_address_watch = kgd_gfx_v10_clear_address_watch, .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 3a6aca2b0eaa..e6b70196071a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -39,6 +39,14 @@ uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev, uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid); +uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid); +uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id); void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index ed36b433a48b..8c8437a4383f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -678,6 +678,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, - .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode - + .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v10_set_address_watch, + .clear_address_watch = kgd_gfx_v10_clear_address_watch }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 9711d5128d09..52efa690a3c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -737,6 +737,54 @@ static uint32_t kgd_gfx_v11_set_wave_launch_mode(struct amdgpu_device *adev, return data; } +#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) +static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 7); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + return watch_address_cntl; +} + +uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -757,5 +805,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .disable_debug_trap = kgd_gfx_v11_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override, - .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode + .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v11_set_address_watch, + .clear_address_watch = kgd_gfx_v11_clear_address_watch }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 0acc0c18dfe6..51d93fb13ea3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -816,6 +816,81 @@ uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev, return 0; } +#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H) +uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VMID, + debug_vmid); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 6); + + /* Turning off this watch point until we set all the registers */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 0); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + /* Enable the watch point */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + +uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + /* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -1090,6 +1165,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v9_set_address_watch, + .clear_address_watch = kgd_gfx_v9_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 18f4970ac8e4..5f54bff0db49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -84,6 +84,14 @@ uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev, uint32_t trap_mask_request, uint32_t *trap_mask_prev, uint32_t kfd_dbg_trap_cntl_prev); +uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid); +uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id); void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1fae97df7a1e..016724c82928 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2880,6 +2880,7 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v struct mm_struct *mm = NULL; struct pid *pid = NULL; struct kfd_process *target = NULL; + struct kfd_process_device *pdd = NULL; int r = 0; if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { @@ -2957,6 +2958,20 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v goto unlock_out; } + if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || + args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) { + int user_gpu_id = kfd_process_get_user_gpu_id(target, + args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ? + args->set_node_address_watch.gpu_id : + args->clear_node_address_watch.gpu_id); + + pdd = kfd_process_device_data_by_id(target, user_gpu_id); + if (user_gpu_id == -EINVAL || !pdd) { + r = -ENODEV; + goto unlock_out; + } + } + switch (args->op) { case KFD_IOC_DBG_TRAP_ENABLE: if (target != p) @@ -3009,7 +3024,16 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v (uint32_t *)args->resume_queues.queue_array_ptr); break; case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: + r = kfd_dbg_trap_set_dev_address_watch(pdd, + args->set_node_address_watch.address, + args->set_node_address_watch.mask, + &args->set_node_address_watch.id, + args->set_node_address_watch.mode); + break; case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: + r = kfd_dbg_trap_clear_dev_address_watch(pdd, + args->clear_node_address_watch.id); + break; case KFD_IOC_DBG_TRAP_SET_FLAGS: case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index f4d3dfb35cb3..4b36cc8b5fb7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -24,6 +24,8 @@ #include "kfd_device_queue_manager.h" #include +#define MAX_WATCH_ADDRESSES 4 + void debug_event_write_work_handler(struct work_struct *work) { struct kfd_process *process; @@ -289,6 +291,139 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) pdd->watch_points, flags); } +#define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1 +static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id) +{ + int i; + + *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID; + + spin_lock(&pdd->dev->kfd->watch_points_lock); + + for (i = 0; i < MAX_WATCH_ADDRESSES; i++) { + /* device watchpoint in use so skip */ + if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1) + continue; + + pdd->alloc_watch_ids |= 0x1 << i; + pdd->dev->kfd->alloc_watch_ids |= 0x1 << i; + *watch_id = i; + spin_unlock(&pdd->dev->kfd->watch_points_lock); + return 0; + } + + spin_unlock(&pdd->dev->kfd->watch_points_lock); + + return -ENOMEM; +} + +static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) +{ + spin_lock(&pdd->dev->kfd->watch_points_lock); + + /* process owns device watch point so safe to clear */ + if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { + pdd->alloc_watch_ids &= ~(0x1 << watch_id); + pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id); + } + + spin_unlock(&pdd->dev->kfd->watch_points_lock); +} + +static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) +{ + bool owns_watch_id = false; + + spin_lock(&pdd->dev->kfd->watch_points_lock); + owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && + ((pdd->alloc_watch_ids >> watch_id) & 0x1); + + spin_unlock(&pdd->dev->kfd->watch_points_lock); + + return owns_watch_id; +} + +int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, + uint32_t watch_id) +{ + int r; + + if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) + return -EINVAL; + + if (!pdd->dev->kfd->shared_resources.enable_mes) { + r = debug_lock_and_unmap(pdd->dev->dqm); + if (r) + return r; + } + + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch( + pdd->dev->adev, + watch_id); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_map_and_unlock(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + kfd_dbg_clear_dev_watch_id(pdd, watch_id); + + return r; +} + +int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t *watch_id, + uint32_t watch_mode) +{ + int r = kfd_dbg_get_dev_watch_id(pdd, watch_id); + + if (r) + return r; + + if (!pdd->dev->kfd->shared_resources.enable_mes) { + r = debug_lock_and_unmap(pdd->dev->dqm); + if (r) { + kfd_dbg_clear_dev_watch_id(pdd, *watch_id); + return r; + } + } + + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( + pdd->dev->adev, + watch_address, + watch_address_mask, + *watch_id, + watch_mode, + pdd->dev->vm_info.last_vmid_kfd); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_map_and_unlock(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + /* HWS is broken so no point in HW rollback but release the watchpoint anyways */ + if (r) + kfd_dbg_clear_dev_watch_id(pdd, *watch_id); + + return 0; +} + +static void kfd_dbg_clear_process_address_watch(struct kfd_process *target) +{ + int i, j; + + for (i = 0; i < target->n_pdds; i++) + for (j = 0; j < MAX_WATCH_ADDRESSES; j++) + kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j); +} + + /* kfd_dbg_trap_deactivate: * target: target process * unwind: If this is unwinding a failed kfd_dbg_trap_enable() @@ -303,6 +438,7 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (!unwind) { cancel_work_sync(&target->debug_event_workarea); + kfd_dbg_clear_process_address_watch(target); kfd_dbg_trap_set_wave_launch_mode(target, 0); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index cb17869437c5..7f0757c2af2c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -50,7 +50,13 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, uint32_t *trap_mask_supported); int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, uint8_t wave_launch_mode); - +int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, + uint32_t watch_id); +int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t *watch_id, + uint32_t watch_mode); int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2c36bb578633..9fc9d32cb579 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -811,6 +811,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; + spin_lock_init(&kfd->watch_points_lock); + kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 05da43bf233a..8ec87bc8ba82 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -381,6 +381,10 @@ struct kfd_dev { struct kfd_node *nodes[MAX_KFD_NODES]; unsigned int num_nodes; + + /* Track per device allocated watch points */ + uint32_t alloc_watch_ids; + spinlock_t watch_points_lock; }; enum kfd_mempool { @@ -833,6 +837,7 @@ struct kfd_process_device { uint32_t spi_dbg_override; uint32_t spi_dbg_launch_mode; uint32_t watch_points[4]; + uint32_t alloc_watch_ids; /* * If this process has been checkpointed before, then the user @@ -989,7 +994,6 @@ struct kfd_process { struct semaphore runtime_enable_sema; bool is_runtime_retry; struct kfd_runtime_info runtime_info; - }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ From 103d5f08ff42b666c61c350be2c3e724c1646918 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 9 May 2022 10:51:56 -0400 Subject: [PATCH 771/861] drm/amdkfd: add debug set flags operation Allow the debugger to set single memory and single ALU operations. Some exceptions are imprecise (memory violations, address watch) in the sense that a trap occurs only when the exception interrupt occurs and not at the non-halting faulty instruction. Trap temporaries 0 & 1 save the program counter address, which means that these values will not point to the faulty instruction address but to whenever the interrupt was raised. Setting the Single Memory Operations flag will inject an automatic wait on every memory operation instruction forcing imprecise memory exceptions to become precise at the cost of performance. This setting is not permitted on debug devices that support only a global setting of this option. Return the previous set flags to the debugger as well. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 58 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 1 + 3 files changed, 61 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 016724c82928..5ee38614ed9b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3035,6 +3035,8 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->clear_node_address_watch.id); break; case KFD_IOC_DBG_TRAP_SET_FLAGS: + r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags); + break; case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 4b36cc8b5fb7..43c3170998d3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -23,6 +23,7 @@ #include "kfd_debug.h" #include "kfd_device_queue_manager.h" #include +#include #define MAX_WATCH_ADDRESSES 4 @@ -423,6 +424,59 @@ static void kfd_dbg_clear_process_address_watch(struct kfd_process *target) kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j); } +int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) +{ + uint32_t prev_flags = target->dbg_flags; + int i, r = 0, rewind_count = 0; + + for (i = 0; i < target->n_pdds; i++) { + if (!kfd_dbg_is_per_vmid_supported(target->pdds[i]->dev) && + (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) { + *flags = prev_flags; + return -EACCES; + } + } + + target->dbg_flags = *flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP; + *flags = prev_flags; + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) + continue; + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_refresh_runlist(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + if (r) { + target->dbg_flags = prev_flags; + break; + } + + rewind_count++; + } + + /* Rewind flags */ + if (r) { + target->dbg_flags = prev_flags; + + for (i = 0; i < rewind_count; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) + continue; + + if (!pdd->dev->kfd->shared_resources.enable_mes) + debug_refresh_runlist(pdd->dev->dqm); + else + kfd_dbg_set_mes_debug_mode(pdd); + } + } + + return r; +} /* kfd_dbg_trap_deactivate: * target: target process @@ -437,9 +491,13 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind int i; if (!unwind) { + uint32_t flags = 0; + cancel_work_sync(&target->debug_event_workarea); kfd_dbg_clear_process_address_watch(target); kfd_dbg_trap_set_wave_launch_mode(target, 0); + + kfd_dbg_trap_set_flags(target, &flags); } for (i = 0; i < target->n_pdds; i++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 7f0757c2af2c..ef8e9f7f1716 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -57,6 +57,7 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, uint32_t watch_address_mask, uint32_t *watch_id, uint32_t watch_mode); +int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags); int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, From 5bc20c224bcb863571e8831cdbba23cd61b10ac3 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 9 May 2022 11:10:32 -0400 Subject: [PATCH 772/861] drm/amdkfd: add debug query event operation Allow the debugger to query a single queue, device and process exception. The KFD should also return the GPU or Queue id of the exception. The debugger also has the option of clearing exceptions after being queried. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 +++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 64 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 5 ++ 3 files changed, 75 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5ee38614ed9b..498859259b55 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3038,6 +3038,12 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags); break; case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: + r = kfd_dbg_ev_query_debug_event(target, + &args->query_debug_event.queue_id, + &args->query_debug_event.gpu_id, + args->query_debug_event.exception_mask, + &args->query_debug_event.exception_mask); + break; case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 43c3170998d3..e9530e682e85 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -27,6 +27,70 @@ #define MAX_WATCH_ADDRESSES 4 +int kfd_dbg_ev_query_debug_event(struct kfd_process *process, + unsigned int *queue_id, + unsigned int *gpu_id, + uint64_t exception_clear_mask, + uint64_t *event_status) +{ + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + int i; + + if (!(process && process->debug_trap_enabled)) + return -ENODATA; + + mutex_lock(&process->event_mutex); + *event_status = 0; + *queue_id = 0; + *gpu_id = 0; + + /* find and report queue events */ + pqm = &process->pqm; + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + uint64_t tmp = process->exception_enable_mask; + + if (!pqn->q) + continue; + + tmp &= pqn->q->properties.exception_status; + + if (!tmp) + continue; + + *event_status = pqn->q->properties.exception_status; + *queue_id = pqn->q->properties.queue_id; + *gpu_id = pqn->q->device->id; + pqn->q->properties.exception_status &= ~exception_clear_mask; + goto out; + } + + /* find and report device events */ + for (i = 0; i < process->n_pdds; i++) { + struct kfd_process_device *pdd = process->pdds[i]; + uint64_t tmp = process->exception_enable_mask + & pdd->exception_status; + + if (!tmp) + continue; + + *event_status = pdd->exception_status; + *gpu_id = pdd->dev->id; + pdd->exception_status &= ~exception_clear_mask; + goto out; + } + + /* report process events */ + if (process->exception_enable_mask & process->exception_status) { + *event_status = process->exception_status; + process->exception_status &= ~exception_clear_mask; + } + +out: + mutex_unlock(&process->event_mutex); + return *event_status ? 0 : -EAGAIN; +} + void debug_event_write_work_handler(struct work_struct *work) { struct kfd_process *process; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index ef8e9f7f1716..e78f954c0684 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -27,6 +27,11 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count); int kfd_dbg_trap_activate(struct kfd_process *target); +int kfd_dbg_ev_query_debug_event(struct kfd_process *process, + unsigned int *queue_id, + unsigned int *gpu_id, + uint64_t exception_clear_mask, + uint64_t *event_status); bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, unsigned int pasid, uint32_t doorbell_id, From 2b36de971d25daa2ad287114ae3ca11a8f8d49d7 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 9 May 2022 13:37:36 -0400 Subject: [PATCH 773/861] drm/amdkfd: add debug query exception info operation Allow the debugger to query additional info based on an exception code. For device exceptions, it's currently only memory violation information. For process exceptions, it's currently only runtime information. Queue exception only report the queue exception status. The debugger has the option of clearing the target exception on query. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 120 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 6 ++ 3 files changed, 133 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 498859259b55..b7ee79b5220a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3045,6 +3045,13 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v &args->query_debug_event.exception_mask); break; case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: + r = kfd_dbg_trap_query_exception_info(target, + args->query_exception_info.source_id, + args->query_exception_info.exception_code, + args->query_exception_info.clear_exception, + (void __user *)args->query_exception_info.info_ptr, + &args->query_exception_info.info_size); + break; case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: pr_warn("Debug op %i not supported yet\n", args->op); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index e9530e682e85..24e2b285448a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -890,6 +890,126 @@ int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, return r; } +int kfd_dbg_trap_query_exception_info(struct kfd_process *target, + uint32_t source_id, + uint32_t exception_code, + bool clear_exception, + void __user *info, + uint32_t *info_size) +{ + bool found = false; + int r = 0; + uint32_t copy_size, actual_info_size = 0; + uint64_t *exception_status_ptr = NULL; + + if (!target) + return -EINVAL; + + if (!info || !info_size) + return -EINVAL; + + mutex_lock(&target->event_mutex); + + if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) { + /* Per queue exceptions */ + struct queue *queue = NULL; + int i; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + struct qcm_process_device *qpd = &pdd->qpd; + + list_for_each_entry(queue, &qpd->queues_list, list) { + if (!found && queue->properties.queue_id == source_id) { + found = true; + break; + } + } + if (found) + break; + } + + if (!found) { + r = -EINVAL; + goto out; + } + + if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) { + r = -ENODATA; + goto out; + } + exception_status_ptr = &queue->properties.exception_status; + } else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) { + /* Per device exceptions */ + struct kfd_process_device *pdd = NULL; + int i; + + for (i = 0; i < target->n_pdds; i++) { + pdd = target->pdds[i]; + if (pdd->dev->id == source_id) { + found = true; + break; + } + } + + if (!found) { + r = -EINVAL; + goto out; + } + + if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) { + r = -ENODATA; + goto out; + } + + if (exception_code == EC_DEVICE_MEMORY_VIOLATION) { + copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size); + + if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) { + r = -EFAULT; + goto out; + } + actual_info_size = pdd->vm_fault_exc_data_size; + if (clear_exception) { + kfree(pdd->vm_fault_exc_data); + pdd->vm_fault_exc_data = NULL; + pdd->vm_fault_exc_data_size = 0; + } + } + exception_status_ptr = &pdd->exception_status; + } else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) { + /* Per process exceptions */ + if (!(target->exception_status & KFD_EC_MASK(exception_code))) { + r = -ENODATA; + goto out; + } + + if (exception_code == EC_PROCESS_RUNTIME) { + copy_size = min((size_t)(*info_size), sizeof(target->runtime_info)); + + if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) { + r = -EFAULT; + goto out; + } + + actual_info_size = sizeof(target->runtime_info); + } + + exception_status_ptr = &target->exception_status; + } else { + pr_debug("Bad exception type [%i]\n", exception_code); + r = -EINVAL; + goto out; + } + + *info_size = actual_info_size; + if (clear_exception) + *exception_status_ptr &= ~KFD_EC_MASK(exception_code); +out: + mutex_unlock(&target->event_mutex); + return r; +} + void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, uint64_t exception_set_mask) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index e78f954c0684..234e2ccafa87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -63,6 +63,12 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, uint32_t *watch_id, uint32_t watch_mode); int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags); +int kfd_dbg_trap_query_exception_info(struct kfd_process *target, + uint32_t source_id, + uint32_t exception_code, + bool clear_exception, + void __user *info, + uint32_t *info_size); int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, From b17bd5dbf64677682a3bca249c64521d5eabcb38 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 10 May 2022 11:15:29 -0400 Subject: [PATCH 774/861] drm/amdkfd: add debug queue snapshot operation Allow the debugger to get a snapshot of a specified number of queues containing various queue property information that is copied to the debugger. Since the debugger doesn't know how many queues exist at any given time, allow the debugger to pass the requested number of snapshots as 0 to get the actual number of potential snapshots to use for a subsequent snapshot request for actual information. To prevent future ABI breakage, pass in the requested entry_size. The KFD will return it's own entry_size in case the debugger still wants log the information in a core dump on sizing failure. Also allow the debugger to clear exceptions when doing a snapshot. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 +++ .../drm/amd/amdkfd/kfd_device_queue_manager.c | 36 +++++++++++++++++ .../drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +++ .../amd/amdkfd/kfd_process_queue_manager.c | 40 +++++++++++++++++++ 5 files changed, 90 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index b7ee79b5220a..24066756e478 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3053,6 +3053,12 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v &args->query_exception_info.info_size); break; case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: + r = pqm_get_queue_snapshot(&target->pqm, + args->queue_snapshot.exception_mask, + (void __user *)args->queue_snapshot.snapshot_buf_ptr, + &args->queue_snapshot.num_queues, + &args->queue_snapshot.entry_size); + break; case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: pr_warn("Debug op %i not supported yet\n", args->op); r = -EACCES; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index bc9e81293165..0c1be91a87c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -3041,6 +3041,42 @@ int suspend_queues(struct kfd_process *p, return total_suspended; } +static uint32_t set_queue_type_for_user(struct queue_properties *q_props) +{ + switch (q_props->type) { + case KFD_QUEUE_TYPE_COMPUTE: + return q_props->format == KFD_QUEUE_FORMAT_PM4 + ? KFD_IOC_QUEUE_TYPE_COMPUTE + : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL; + case KFD_QUEUE_TYPE_SDMA: + return KFD_IOC_QUEUE_TYPE_SDMA; + case KFD_QUEUE_TYPE_SDMA_XGMI: + return KFD_IOC_QUEUE_TYPE_SDMA_XGMI; + default: + WARN_ONCE(true, "queue type not recognized!"); + return 0xffffffff; + }; +} + +void set_queue_snapshot_entry(struct queue *q, + uint64_t exception_clear_mask, + struct kfd_queue_snapshot_entry *qss_entry) +{ + qss_entry->ring_base_address = q->properties.queue_address; + qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr; + qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr; + qss_entry->ctx_save_restore_address = + q->properties.ctx_save_restore_area_address; + qss_entry->ctx_save_restore_area_size = + q->properties.ctx_save_restore_area_size; + qss_entry->exception_status = q->properties.exception_status; + qss_entry->queue_id = q->properties.queue_id; + qss_entry->gpu_id = q->device->id; + qss_entry->ring_size = (uint32_t)q->properties.queue_size; + qss_entry->queue_type = set_queue_type_for_user(&q->properties); + q->properties.exception_status &= ~exception_clear_mask; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d4e6dbffe8c2..7dd4b177219d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -300,6 +300,9 @@ int suspend_queues(struct kfd_process *p, int resume_queues(struct kfd_process *p, uint32_t num_queues, uint32_t *usr_queue_id_array); +void set_queue_snapshot_entry(struct queue *q, + uint64_t exception_clear_mask, + struct kfd_queue_snapshot_entry *qss_entry); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 8ec87bc8ba82..023b17e0116b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1355,6 +1355,11 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size); +int pqm_get_queue_snapshot(struct process_queue_manager *pqm, + uint64_t exception_clear_mask, + void __user *buf, + int *num_qss_entries, + uint32_t *entry_size); int amdkfd_fence_wait_timeout(uint64_t *fence_addr, uint64_t fence_value, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 01ccab607a69..9ad1a2186a24 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -585,6 +585,46 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, save_area_used_size); } +int pqm_get_queue_snapshot(struct process_queue_manager *pqm, + uint64_t exception_clear_mask, + void __user *buf, + int *num_qss_entries, + uint32_t *entry_size) +{ + struct process_queue_node *pqn; + struct kfd_queue_snapshot_entry src; + uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries; + int r = 0; + + *num_qss_entries = 0; + if (!(*entry_size)) + return -EINVAL; + + *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry)); + mutex_lock(&pqm->process->event_mutex); + + memset(&src, 0, sizeof(src)); + + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn->q) + continue; + + if (*num_qss_entries < tmp_qss_entries) { + set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src); + + if (copy_to_user(buf, &src, *entry_size)) { + r = -EFAULT; + break; + } + buf += tmp_entry_size; + } + *num_qss_entries += 1; + } + + mutex_unlock(&pqm->process->event_mutex); + return r; +} + static int get_queue_data_sizes(struct kfd_process_device *pdd, struct queue *q, uint32_t *mqd_size, From 12976e6a5ab8fc3766c0304d72f7eec81a109b55 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 10 May 2022 12:47:45 -0400 Subject: [PATCH 775/861] drm/amdkfd: add debug device snapshot operation Similar to queue snapshot, return an array of device information using an entry_size check and return. Unlike queue snapshots, the debugger needs to pass to correct number of devices that exist. If it fails to do so, the KFD will return the number of actual devices so that the debugger can make a subsequent successful call. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++- drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 73 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 5 ++ 3 files changed, 83 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 24066756e478..f54ff5c3387d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3060,8 +3060,11 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v &args->queue_snapshot.entry_size); break; case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: - pr_warn("Debug op %i not supported yet\n", args->op); - r = -EACCES; + r = kfd_dbg_trap_device_snapshot(target, + args->device_snapshot.exception_mask, + (void __user *)args->device_snapshot.snapshot_buf_ptr, + &args->device_snapshot.num_devices, + &args->device_snapshot.entry_size); break; default: pr_err("Invalid option: %i\n", args->op); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 24e2b285448a..125274445f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -22,6 +22,7 @@ #include "kfd_debug.h" #include "kfd_device_queue_manager.h" +#include "kfd_topology.h" #include #include @@ -1010,6 +1011,78 @@ out: return r; } +int kfd_dbg_trap_device_snapshot(struct kfd_process *target, + uint64_t exception_clear_mask, + void __user *user_info, + uint32_t *number_of_device_infos, + uint32_t *entry_size) +{ + struct kfd_dbg_device_info_entry device_info; + uint32_t tmp_entry_size = *entry_size, tmp_num_devices; + int i, r = 0; + + if (!(target && user_info && number_of_device_infos && entry_size)) + return -EINVAL; + + tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds); + *number_of_device_infos = target->n_pdds; + *entry_size = min_t(size_t, *entry_size, sizeof(device_info)); + + if (!tmp_num_devices) + return 0; + + memset(&device_info, 0, sizeof(device_info)); + + mutex_lock(&target->event_mutex); + + /* Run over all pdd of the process */ + for (i = 0; i < tmp_num_devices; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id); + + device_info.gpu_id = pdd->dev->id; + device_info.exception_status = pdd->exception_status; + device_info.lds_base = pdd->lds_base; + device_info.lds_limit = pdd->lds_limit; + device_info.scratch_base = pdd->scratch_base; + device_info.scratch_limit = pdd->scratch_limit; + device_info.gpuvm_base = pdd->gpuvm_base; + device_info.gpuvm_limit = pdd->gpuvm_limit; + device_info.location_id = topo_dev->node_props.location_id; + device_info.vendor_id = topo_dev->node_props.vendor_id; + device_info.device_id = topo_dev->node_props.device_id; + device_info.revision_id = pdd->dev->adev->pdev->revision; + device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor; + device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device; + device_info.fw_version = pdd->dev->kfd->mec_fw_version; + device_info.gfx_target_version = + topo_dev->node_props.gfx_target_version; + device_info.simd_count = topo_dev->node_props.simd_count; + device_info.max_waves_per_simd = + topo_dev->node_props.max_waves_per_simd; + device_info.array_count = topo_dev->node_props.array_count; + device_info.simd_arrays_per_engine = + topo_dev->node_props.simd_arrays_per_engine; + device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask); + device_info.capability = topo_dev->node_props.capability; + device_info.debug_prop = topo_dev->node_props.debug_prop; + + if (exception_clear_mask) + pdd->exception_status &= ~exception_clear_mask; + + if (copy_to_user(user_info, &device_info, *entry_size)) { + r = -EFAULT; + break; + } + + user_info += tmp_entry_size; + } + + mutex_unlock(&target->event_mutex); + + return r; +} + void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, uint64_t exception_set_mask) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 234e2ccafa87..a289e59ceb79 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -81,6 +81,11 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) } void debug_event_write_work_handler(struct work_struct *work); +int kfd_dbg_trap_device_snapshot(struct kfd_process *target, + uint64_t exception_clear_mask, + void __user *user_info, + uint32_t *number_of_device_infos, + uint32_t *entry_size); void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, uint64_t exception_set_mask); From a159afdad2f6b97e4d18549cff2b53d17e68a412 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 10 May 2022 12:51:26 -0400 Subject: [PATCH 776/861] drm/amdkfd: bump kfd ioctl minor version for debug api availability Bump the minor version to declare debugging capability is now available. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 1 - include/uapi/linux/kfd_ioctl.h | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f54ff5c3387d..cce2abe12e1b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2984,7 +2984,6 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v if (!r) target->exception_enable_mask = args->enable.exception_mask; - pr_warn("Debug functions limited\n"); break; case KFD_IOC_DBG_TRAP_DISABLE: r = kfd_dbg_trap_disable(target); diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 32913d674d38..1781e7669982 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -38,9 +38,10 @@ * - 1.10 - Add SMI profiler event log * - 1.11 - Add unified memory for ctx save/restore area * - 1.12 - Add DMA buf export ioctl + * - 1.13 - Add debugger API */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 12 +#define KFD_IOCTL_MINOR_VERSION 13 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ From 8f4f5f0b901a444c2317ef0fb29f35bc296daa55 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 11 Apr 2023 11:49:09 +0800 Subject: [PATCH 777/861] drm/amd/pm: fulfill SMU13 OD settings init and restore Gfxclk fmin/fmax, Uclk fmin/fmax and Gfx v/f curve voltage offset OD settings are supported for SMU13. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 26 +++- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 13 +- .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 129 +++++++++++++++++- .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 128 ++++++++++++++++- 4 files changed, 286 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index da0da03569e8..a57952b93e73 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -678,7 +678,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * clock labeled OD_MCLK * * - three points labeled OD_VDDC_CURVE. - * They can be used to calibrate the sclk voltage curve. + * They can be used to calibrate the sclk voltage curve. This is + * available for Vega20 and NV1X. + * + * - voltage offset for the six anchor points of the v/f curve labeled + * OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This + * is only availabe for some SMU13 ASICs. * * - voltage offset(in mV) applied on target voltage calculation. * This is available for Sienna Cichlid, Navy Flounder and Dimgrey @@ -719,12 +724,19 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * E.g., "p 2 0 800" would set the minimum core clock on core * 2 to 800Mhz. * - * For sclk voltage curve, enter the new values by writing a - * string that contains "vc point clock voltage" to the file. The - * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will - * update point1 with clock set as 300Mhz and voltage as - * 600mV. "vc 2 1000 1000" will update point3 with clock set - * as 1000Mhz and voltage 1000mV. + * For sclk voltage curve, + * - For NV1X, enter the new values by writing a string that + * contains "vc point clock voltage" to the file. The points + * are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update + * point1 with clock set as 300Mhz and voltage as 600mV. "vc 2 + * 1000 1000" will update point3 with clock set as 1000Mhz and + * voltage 1000mV. + * - For SMU13 ASICs, enter the new values by writing a string that + * contains "vc anchor_point_index voltage_offset" to the file. + * There are total six anchor points defined on the v/f curve with + * index as 0 - 5. + * - "vc 0 10" will update the voltage offset for point1 as 10mv. + * - "vc 5 -10" will update the voltage offset for point6 as -10mv. * * To update the voltage offset applied for gfxclk/voltage calculation, * enter the new value by writing a string that contains "vo offset". diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 70db36d45974..e52c563f0dac 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -467,17 +467,26 @@ int smu_v13_0_init_smc_tables(struct smu_context *smu) ret = -ENOMEM; goto err3_out; } + + smu_table->user_overdrive_table = + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); + if (!smu_table->user_overdrive_table) { + ret = -ENOMEM; + goto err4_out; + } } smu_table->combo_pptable = kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); if (!smu_table->combo_pptable) { ret = -ENOMEM; - goto err4_out; + goto err5_out; } return 0; +err5_out: + kfree(smu_table->user_overdrive_table); err4_out: kfree(smu_table->boot_overdrive_table); err3_out: @@ -497,12 +506,14 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->gpu_metrics_table); kfree(smu_table->combo_pptable); + kfree(smu_table->user_overdrive_table); kfree(smu_table->boot_overdrive_table); kfree(smu_table->overdrive_table); kfree(smu_table->max_sustainable_clocks); kfree(smu_table->driver_pptable); smu_table->gpu_metrics_table = NULL; smu_table->combo_pptable = NULL; + smu_table->user_overdrive_table = NULL; smu_table->boot_overdrive_table = NULL; smu_table->overdrive_table = NULL; smu_table->max_sustainable_clocks = NULL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index f4783e685bf8..faa772db106c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -237,6 +237,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(I2C_COMMANDS), TAB_MAP(ECCINFO), + TAB_MAP(OVERDRIVE), }; static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -331,6 +332,11 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) struct smu_13_0_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsMin; if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -342,6 +348,10 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) smu_baco->maco_support = true; + if (!overdrive_lowerlimits->FeatureCtrlMask || + !overdrive_upperlimits->FeatureCtrlMask) + smu->od_enabled = false; + table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -461,7 +471,7 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -1384,6 +1394,121 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static int smu_v13_0_0_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); + if (ret) + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); + + return ret; +} + +static int smu_v13_0_0_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); + if (ret) + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + + return ret; +} + +static void smu_v13_0_0_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) +{ + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; + OverDriveTableExternal_t *boot_od_table = + (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; + OverDriveTableExternal_t *user_od_table = + (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; + OverDriveTableExternal_t user_od_table_bak; + int ret = 0; + int i; + + ret = smu_v13_0_0_get_overdrive_table(smu, boot_od_table); + if (ret) + return ret; + + smu_v13_0_0_dump_od_table(smu, boot_od_table); + + memcpy(od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, + user_od_table, + sizeof(OverDriveTableExternal_t)); + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + user_od_table->OverDriveTable.GfxclkFmin = + user_od_table_bak.OverDriveTable.GfxclkFmin; + user_od_table->OverDriveTable.GfxclkFmax = + user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.UclkFmin = + user_od_table_bak.OverDriveTable.UclkFmin; + user_od_table->OverDriveTable.UclkFmax = + user_od_table_bak.OverDriveTable.UclkFmax; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = + user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; + } + + return 0; +} + +static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = table_context->overdrive_table; + OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; + int res; + + user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT | + 1U << PP_OD_FEATURE_UCLK_BIT | + 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; + res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table); + user_od_table->OverDriveTable.FeatureCtrlMask = 0; + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); + + return res; +} + static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = @@ -2150,6 +2275,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, .get_gpu_metrics = smu_v13_0_0_get_gpu_metrics, .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, + .set_default_od_settings = smu_v13_0_0_set_default_od_settings, + .restore_user_od_settings = smu_v13_0_0_restore_user_od_settings, .init_pptable_microcode = smu_v13_0_init_pptable_microcode, .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk, .set_performance_level = smu_v13_0_set_performance_level, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index cf6827179fd1..20a8be3bb49a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -206,6 +206,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(ACTIVITY_MONITOR_COEFF), [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, + TAB_MAP(OVERDRIVE), }; static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -322,6 +323,10 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) struct smu_baco_context *smu_baco = &smu->smu_baco; PPTable_t *smc_pptable = table_context->driver_pptable; BoardTable_t *BoardTable = &smc_pptable->BoardTable; + const OverDriveLimits_t * const overdrive_upperlimits = + &smc_pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &smc_pptable->SkuTable.OverDriveLimitsMin; if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -333,6 +338,10 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) smu_baco->maco_support = true; + if (!overdrive_lowerlimits->FeatureCtrlMask || + !overdrive_upperlimits->FeatureCtrlMask) + smu->od_enabled = false; + table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -479,7 +488,7 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -1371,6 +1380,121 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); + if (ret) + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); + + return ret; +} + +static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); + if (ret) + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + + return ret; +} + +static void smu_v13_0_7_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) +{ + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; + OverDriveTableExternal_t *boot_od_table = + (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; + OverDriveTableExternal_t *user_od_table = + (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; + OverDriveTableExternal_t user_od_table_bak; + int ret = 0; + int i; + + ret = smu_v13_0_7_get_overdrive_table(smu, boot_od_table); + if (ret) + return ret; + + smu_v13_0_7_dump_od_table(smu, boot_od_table); + + memcpy(od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, + user_od_table, + sizeof(OverDriveTableExternal_t)); + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + user_od_table->OverDriveTable.GfxclkFmin = + user_od_table_bak.OverDriveTable.GfxclkFmin; + user_od_table->OverDriveTable.GfxclkFmax = + user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.UclkFmin = + user_od_table_bak.OverDriveTable.UclkFmin; + user_od_table->OverDriveTable.UclkFmax = + user_od_table_bak.OverDriveTable.UclkFmax; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = + user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; + } + + return 0; +} + +static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = table_context->overdrive_table; + OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; + int res; + + user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT | + 1U << PP_OD_FEATURE_UCLK_BIT | + 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; + res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table); + user_od_table->OverDriveTable.FeatureCtrlMask = 0; + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); + + return res; +} + static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = @@ -1760,6 +1884,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics, .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, + .set_default_od_settings = smu_v13_0_7_set_default_od_settings, + .restore_user_od_settings = smu_v13_0_7_restore_user_od_settings, .set_performance_level = smu_v13_0_set_performance_level, .gfx_off_control = smu_v13_0_gfx_off_control, .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm, From 2e8452ea4ef6406927e4c5a71d1a7ed6881c5a9b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 8 May 2023 16:57:02 +0800 Subject: [PATCH 778/861] drm/amd/pm: fulfill the OD support for SMU13.0.0 Fulfill the interfaces for OD settings retrieving and setting. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 445 ++++++++++++++++-- 1 file changed, 402 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index faa772db106c..5ac5ea770c1c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1032,17 +1032,119 @@ static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu, value); } +static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu, + int od_feature_bit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + + return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); +} + +static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, + int od_feature_bit, + bool lower_boundary, + int32_t *min, + int32_t *max) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsMin; + int32_t od_min_setting, od_max_setting; + + switch (od_feature_bit) { + case PP_OD_FEATURE_GFXCLK_BIT: + if (lower_boundary) { + od_min_setting = overdrive_lowerlimits->GfxclkFmin; + od_max_setting = overdrive_upperlimits->GfxclkFmin; + } else { + od_min_setting = overdrive_lowerlimits->GfxclkFmax; + od_max_setting = overdrive_upperlimits->GfxclkFmax; + } + break; + case PP_OD_FEATURE_UCLK_BIT: + if (lower_boundary) { + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + } else { + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + } + break; + case PP_OD_FEATURE_GFX_VF_CURVE_BIT: + od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary; + od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary; + break; + default: + break; + } + + if (min) + *min = od_min_setting; + if (max) + *max = od_max_setting; +} + +static void smu_v13_0_0_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v13_0_0_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); + if (ret) + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); + + return ret; +} + +static int smu_v13_0_0_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); + if (ret) + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + + return ret; +} + static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; uint32_t gen_speed, lane_width; int i, curr_freq, size = 0; + int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1159,6 +1261,89 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, "*" : ""); break; + case SMU_OD_SCLK: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_GFXCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + break; + + case SMU_OD_MCLK: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_UCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", + od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); + break; + + case SMU_OD_VDDC_CURVE: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n"); + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + size += sysfs_emit_at(buf, size, "%d: %dmv\n", + i, + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]); + break; + + case SMU_OD_RANGE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && + !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && + !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + + if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + true, + &min_value, + NULL); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + false, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + true, + &min_value, + NULL); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + false, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT, + true, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", + min_value, max_value); + } + break; + default: break; } @@ -1166,6 +1351,222 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, return size; } +static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], + uint32_t size) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + uint32_t offset_of_featurectrlmask; + int32_t minimum, maximum; + uint32_t feature_ctrlmask; + int i, ret = 0; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + true, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + case 1: + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + false, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { + dev_err(adev->dev, + "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.GfxclkFmin, + (uint32_t)od_table->OverDriveTable.GfxclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + true, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + case 1: + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + false, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { + dev_err(adev->dev, + "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.UclkFmin, + (uint32_t)od_table->OverDriveTable.UclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_VDDC_CURVE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + dev_warn(adev->dev, "VF curve setting not supported!\n"); + return -ENOTSUPP; + } + + if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS || + input[0] < 0) + return -EINVAL; + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT, + true, + &minimum, + &maximum); + if (input[1] < minimum || + input[1] > maximum) { + dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", + input[1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; + break; + + case PP_OD_RESTORE_DEFAULT_TABLE: + feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; + memcpy(od_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t)); + od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; + fallthrough; + + case PP_OD_COMMIT_DPM_TABLE: + /* + * The member below instructs PMFW the settings focused in + * this single operation. + * `uint32_t FeatureCtrlMask;` + * It does not contain actual informations about user's custom + * settings. Thus we do not cache it. + */ + offset_of_featurectrlmask = offsetof(OverDriveTable_t, FeatureCtrlMask); + if (memcmp(od_table + offset_of_featurectrlmask, + table_context->user_overdrive_table + offset_of_featurectrlmask, + sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask)) { + smu_v13_0_0_dump_od_table(smu, od_table); + + ret = smu_v13_0_0_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + od_table->OverDriveTable.FeatureCtrlMask = 0; + memcpy(table_context->user_overdrive_table + offset_of_featurectrlmask, + od_table + offset_of_featurectrlmask, + sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask); + + if (!memcmp(table_context->user_overdrive_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t))) + smu->user_dpm_profile.user_od = false; + else + smu->user_dpm_profile.user_od = true; + } + break; + + default: + return -ENOSYS; + } + + return ret; +} + static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) @@ -1394,49 +1795,6 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } -static int smu_v13_0_0_get_overdrive_table(struct smu_context *smu, - OverDriveTableExternal_t *od_table) -{ - int ret = 0; - - ret = smu_cmn_update_table(smu, - SMU_TABLE_OVERDRIVE, - 0, - (void *)od_table, - false); - if (ret) - dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); - - return ret; -} - -static int smu_v13_0_0_upload_overdrive_table(struct smu_context *smu, - OverDriveTableExternal_t *od_table) -{ - int ret = 0; - - ret = smu_cmn_update_table(smu, - SMU_TABLE_OVERDRIVE, - 0, - (void *)od_table, - true); - if (ret) - dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); - - return ret; -} - -static void smu_v13_0_0_dump_od_table(struct smu_context *smu, - OverDriveTableExternal_t *od_table) -{ - struct amdgpu_device *adev = smu->adev; - - dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); - dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, - od_table->OverDriveTable.UclkFmax); -} - static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) { OverDriveTableExternal_t *od_table = @@ -2277,6 +2635,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, .set_default_od_settings = smu_v13_0_0_set_default_od_settings, .restore_user_od_settings = smu_v13_0_0_restore_user_od_settings, + .od_edit_dpm_table = smu_v13_0_0_od_edit_dpm_table, .init_pptable_microcode = smu_v13_0_init_pptable_microcode, .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk, .set_performance_level = smu_v13_0_set_performance_level, From 1718e973e3d23b653cd77994073a9deda3875689 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 1 Jun 2023 17:53:45 +0530 Subject: [PATCH 779/861] drm/amd/pm: Fill metrics data for SMUv13.0.6 Populate metrics data table for SMU v13.0.6. Add PCIe link speed/width information also. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 97 ++++++++++++------- 1 file changed, 61 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 75255e0baf91..2611241a5ff1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -80,7 +80,10 @@ /* possible frequency drift (1Mhz) */ #define EPSILON 1 -#define smnPCIE_ESM_CTRL 0x111003D0 +#define smnPCIE_ESM_CTRL 0x193D0 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), @@ -197,6 +200,7 @@ struct PPTable_t { }; #define SMUQ10_TO_UINT(x) ((x) >> 10) +#define SMUQ16_TO_UINT(x) ((x) >> 16) struct smu_v13_0_6_dpm_map { enum smu_clk_type clk_type; @@ -1935,6 +1939,15 @@ static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu) smu_v13_0_6_throttler_map)); } +static int +smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + return REG_GET_FIELD(RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL), + PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); +} + static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1953,8 +1966,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table struct smu_table_context *smu_table = &smu->smu_table; struct gpu_metrics_v1_3 *gpu_metrics = (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0, inst0, xcc0; MetricsTable_t *metrics; - int i, ret = 0; + + inst0 = adev->sdma.instance[0].aid_id; + xcc0 = GET_INST(GC, 0); metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); ret = smu_v13_0_6_get_metrics_table(smu, metrics, true); @@ -1963,51 +1980,59 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); - /* TODO: Decide on how to fill in zero value fields */ - gpu_metrics->temperature_edge = 0; - gpu_metrics->temperature_hotspot = 0; - gpu_metrics->temperature_mem = 0; - gpu_metrics->temperature_vrgfx = 0; - gpu_metrics->temperature_vrsoc = 0; - gpu_metrics->temperature_vrmem = 0; + gpu_metrics->temperature_hotspot = + SMUQ10_TO_UINT(metrics->MaxSocketTemperature); + /* Individual HBM stack temperature is not reported */ + gpu_metrics->temperature_mem = + SMUQ10_TO_UINT(metrics->MaxHbmTemperature); + /* Reports max temperature of all voltage rails */ + gpu_metrics->temperature_vrsoc = + SMUQ10_TO_UINT(metrics->MaxVrTemperature); - gpu_metrics->average_gfx_activity = 0; - gpu_metrics->average_umc_activity = 0; - gpu_metrics->average_mm_activity = 0; + gpu_metrics->average_gfx_activity = + SMUQ10_TO_UINT(metrics->SocketGfxBusy); + gpu_metrics->average_umc_activity = + SMUQ10_TO_UINT(metrics->DramBandwidthUtilization); - gpu_metrics->average_socket_power = 0; - gpu_metrics->energy_accumulator = 0; + gpu_metrics->average_socket_power = + SMUQ10_TO_UINT(metrics->SocketPower); + gpu_metrics->energy_accumulator = + SMUQ16_TO_UINT(metrics->SocketEnergyAcc); - gpu_metrics->average_gfxclk_frequency = 0; - gpu_metrics->average_socclk_frequency = 0; - gpu_metrics->average_uclk_frequency = 0; - gpu_metrics->average_vclk0_frequency = 0; - gpu_metrics->average_dclk0_frequency = 0; + gpu_metrics->current_gfxclk = + SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]); + gpu_metrics->current_socclk = + SMUQ10_TO_UINT(metrics->SocclkFrequency[inst0]); + gpu_metrics->current_uclk = SMUQ10_TO_UINT(metrics->UclkFrequency); + gpu_metrics->current_vclk0 = + SMUQ10_TO_UINT(metrics->VclkFrequency[inst0]); + gpu_metrics->current_dclk0 = + SMUQ10_TO_UINT(metrics->DclkFrequency[inst0]); - gpu_metrics->current_gfxclk = 0; - gpu_metrics->current_socclk = 0; - gpu_metrics->current_uclk = 0; - gpu_metrics->current_vclk0 = 0; - gpu_metrics->current_dclk0 = 0; + gpu_metrics->average_gfxclk_frequency = gpu_metrics->current_gfxclk; + gpu_metrics->average_socclk_frequency = gpu_metrics->current_socclk; + gpu_metrics->average_uclk_frequency = gpu_metrics->current_uclk; + gpu_metrics->average_vclk0_frequency = gpu_metrics->current_vclk0; + gpu_metrics->average_dclk0_frequency = gpu_metrics->current_dclk0; + /* Throttle status is not reported through metrics now */ gpu_metrics->throttle_status = 0; - gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status( - gpu_metrics->throttle_status, smu_v13_0_6_throttler_map); - gpu_metrics->current_fan_speed = 0; - - gpu_metrics->pcie_link_width = 0; - gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); + if (!(adev->flags & AMD_IS_APU)) { + gpu_metrics->pcie_link_width = + smu_v13_0_6_get_current_pcie_link_width_level(smu); + gpu_metrics->pcie_link_speed = + smu_v13_0_6_get_current_pcie_link_speed(smu); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); - gpu_metrics->gfx_activity_acc = 0; - gpu_metrics->mem_activity_acc = 0; + gpu_metrics->gfx_activity_acc = + SMUQ10_TO_UINT(metrics->SocketGfxBusyAcc); + gpu_metrics->mem_activity_acc = + SMUQ10_TO_UINT(metrics->DramBandwidthUtilizationAcc); - for (i = 0; i < NUM_HBM_INSTANCES; i++) - gpu_metrics->temperature_hbm[i] = 0; - - gpu_metrics->firmware_timestamp = 0; + gpu_metrics->firmware_timestamp = metrics->Timestamp; *table = (void *)gpu_metrics; kfree(metrics); From 2a9aa52e4617c777fb0c885f0c02bf5ac65a786c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 9 May 2023 10:21:52 +0800 Subject: [PATCH 780/861] drm/amd/pm: fulfill the OD support for SMU13.0.7 Fulfill the interfaces for OD settings retrieving and setting. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 445 ++++++++++++++++-- 1 file changed, 402 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 20a8be3bb49a..0bd086360efa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1022,16 +1022,118 @@ static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu, value); } +static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu, + int od_feature_bit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + + return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); +} + +static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, + int od_feature_bit, + bool lower_boundary, + int32_t *min, + int32_t *max) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsMin; + int32_t od_min_setting, od_max_setting; + + switch (od_feature_bit) { + case PP_OD_FEATURE_GFXCLK_BIT: + if (lower_boundary) { + od_min_setting = overdrive_lowerlimits->GfxclkFmin; + od_max_setting = overdrive_upperlimits->GfxclkFmin; + } else { + od_min_setting = overdrive_lowerlimits->GfxclkFmax; + od_max_setting = overdrive_upperlimits->GfxclkFmax; + } + break; + case PP_OD_FEATURE_UCLK_BIT: + if (lower_boundary) { + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + } else { + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + } + break; + case PP_OD_FEATURE_GFX_VF_CURVE_BIT: + od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary; + od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary; + break; + default: + break; + } + + if (min) + *min = od_min_setting; + if (max) + *max = od_max_setting; +} + +static void smu_v13_0_7_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); + if (ret) + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); + + return ret; +} + +static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret = 0; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); + if (ret) + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + + return ret; +} + static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; int i, curr_freq, size = 0; + int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1148,6 +1250,89 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, "*" : ""); break; + case SMU_OD_SCLK: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_GFXCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + break; + + case SMU_OD_MCLK: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_UCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", + od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); + break; + + case SMU_OD_VDDC_CURVE: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n"); + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + size += sysfs_emit_at(buf, size, "%d: %dmv\n", + i, + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]); + break; + + case SMU_OD_RANGE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && + !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && + !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + + if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + true, + &min_value, + NULL); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + false, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + true, + &min_value, + NULL); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + false, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT, + true, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", + min_value, max_value); + } + break; + default: break; } @@ -1155,6 +1340,222 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, return size; } +static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], + uint32_t size) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + uint32_t offset_of_featurectrlmask; + int32_t minimum, maximum; + uint32_t feature_ctrlmask; + int i, ret = 0; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + true, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + case 1: + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_BIT, + false, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { + dev_err(adev->dev, + "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.GfxclkFmin, + (uint32_t)od_table->OverDriveTable.GfxclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + true, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + case 1: + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_BIT, + false, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { + dev_err(adev->dev, + "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.UclkFmin, + (uint32_t)od_table->OverDriveTable.UclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_VDDC_CURVE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + dev_warn(adev->dev, "VF curve setting not supported!\n"); + return -ENOTSUPP; + } + + if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS || + input[0] < 0) + return -EINVAL; + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT, + true, + &minimum, + &maximum); + if (input[1] < minimum || + input[1] > maximum) { + dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", + input[1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; + break; + + case PP_OD_RESTORE_DEFAULT_TABLE: + feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; + memcpy(od_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t)); + od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; + fallthrough; + + case PP_OD_COMMIT_DPM_TABLE: + /* + * The member below instructs PMFW the settings focused in + * this single operation. + * `uint32_t FeatureCtrlMask;` + * It does not contain actual informations about user's custom + * settings. Thus we do not cache it. + */ + offset_of_featurectrlmask = offsetof(OverDriveTable_t, FeatureCtrlMask); + if (memcmp(od_table + offset_of_featurectrlmask, + table_context->user_overdrive_table + offset_of_featurectrlmask, + sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask)) { + smu_v13_0_7_dump_od_table(smu, od_table); + + ret = smu_v13_0_7_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + od_table->OverDriveTable.FeatureCtrlMask = 0; + memcpy(table_context->user_overdrive_table + offset_of_featurectrlmask, + od_table + offset_of_featurectrlmask, + sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask); + + if (!memcmp(table_context->user_overdrive_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t))) + smu->user_dpm_profile.user_od = false; + else + smu->user_dpm_profile.user_od = true; + } + break; + + default: + return -ENOSYS; + } + + return ret; +} + static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) @@ -1380,49 +1781,6 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } -static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu, - OverDriveTableExternal_t *od_table) -{ - int ret = 0; - - ret = smu_cmn_update_table(smu, - SMU_TABLE_OVERDRIVE, - 0, - (void *)od_table, - false); - if (ret) - dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); - - return ret; -} - -static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu, - OverDriveTableExternal_t *od_table) -{ - int ret = 0; - - ret = smu_cmn_update_table(smu, - SMU_TABLE_OVERDRIVE, - 0, - (void *)od_table, - true); - if (ret) - dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); - - return ret; -} - -static void smu_v13_0_7_dump_od_table(struct smu_context *smu, - OverDriveTableExternal_t *od_table) -{ - struct amdgpu_device *adev = smu->adev; - - dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); - dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, - od_table->OverDriveTable.UclkFmax); -} - static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) { OverDriveTableExternal_t *od_table = @@ -1886,6 +2244,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, .set_default_od_settings = smu_v13_0_7_set_default_od_settings, .restore_user_od_settings = smu_v13_0_7_restore_user_od_settings, + .od_edit_dpm_table = smu_v13_0_7_od_edit_dpm_table, .set_performance_level = smu_v13_0_set_performance_level, .gfx_off_control = smu_v13_0_gfx_off_control, .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm, From a4d4db727320e0f80df605ccb877743359448a36 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 2 Jun 2023 15:19:22 +0530 Subject: [PATCH 781/861] drm/amd/display: Fix dc/dcn20/dcn20_optc.c kdoc Fix all kdoc warnings in dc/dcn20/dcn20_optc.c: display/dc/dcn20/dcn20_optc.c:41: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Enable CRTC display/dc/dcn20/dcn20_optc.c:76: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst *For the below, I'm not sure how your GSL parameters are stored in your env, display/dc/dcn20/dcn20_optc.c:85: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * There are (MAX_OPTC+1)/2 gsl groups available for use. Cc: Rodrigo Siqueira Cc: Aurabindo Pillai Cc: Harry Wentland Signed-off-by: Srinivasan Shanmugam Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index e0edc163d767..d6f095b4555d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -38,8 +38,12 @@ optc1->tg_shift->field_name, optc1->tg_mask->field_name /** - * Enable CRTC - * Enable CRTC - call ASIC Control Object to enable Timing generator. + * optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. + * + * Return: If CRTC is enabled, return true. + * */ bool optc2_enable_crtc(struct timing_generator *optc) { @@ -73,15 +77,18 @@ bool optc2_enable_crtc(struct timing_generator *optc) } /** - *For the below, I'm not sure how your GSL parameters are stored in your env, - * so I will assume a gsl_params struct for now + * optc2_set_gsl() - Assign OTG to GSL groups, + * set one of the OTGs to be master & rest are slaves + * + * @optc: timing_generator instance. + * @params: pointer to gsl_params */ void optc2_set_gsl(struct timing_generator *optc, const struct gsl_params *params) { struct optc *optc1 = DCN10TG_FROM_TG(optc); -/** +/* * There are (MAX_OPTC+1)/2 gsl groups available for use. * In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1, * set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves. From 2b607025797543433e7733c276ec34381edffd71 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 2 Jun 2023 14:17:53 +0800 Subject: [PATCH 782/861] drm/amd/display: clean up some inconsistent indenting No functional modification involved. drivers/gpu/drm/amd/amdgpu/../display/dc/link/link_dpms.c:2377 link_set_dpms_on() warn: inconsistent indenting. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5376 Acked-by: Alex Deucher Signed-off-by: Jiapeng Chong Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 2963267fe74a..f7f1a1586f3b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2374,8 +2374,8 @@ void link_set_dpms_on( } } - if (dc_is_virtual_signal(pipe_ctx->stream->signal)) - return; + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); From 24e461e84f1c6d58fa1032f06d97e277dd0b4adf Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Tue, 9 May 2023 16:40:19 -0400 Subject: [PATCH 783/861] drm/amd/display: add ODM case when looking for first split pipe [Why] When going from ODM 2:1 single display case to max displays, second odm pipe needs to be repurposed for one of the new single displays. However, acquire_first_split_pipe() only handles MPC case and not ODM case [How] Add ODM conditions in acquire_first_split_pipe() Add commit_minimal_transition_state() in commit_streams() to handle odm 2:1 exit first, and then process new streams Handle ODM condition in commit_minimal_transition_state() Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Stylon Wang Signed-off-by: Samson Tam Reviewed-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 36 ++++++++++++++++++- .../gpu/drm/amd/display/dc/core/dc_resource.c | 20 +++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index af929ee11af5..0e9403f21dae 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2008,6 +2008,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c return result; } +static bool commit_minimal_transition_state(struct dc *dc, + struct dc_state *transition_base_context); + /** * dc_commit_streams - Commit current stream state * @@ -2029,6 +2032,8 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_state *context; enum dc_status res = DC_OK; struct dc_validation_set set[MAX_STREAMS] = {0}; + struct pipe_ctx *pipe; + bool handle_exit_odm2to1 = false; if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; @@ -2053,6 +2058,22 @@ enum dc_status dc_commit_streams(struct dc *dc, } } + /* Check for case where we are going from odm 2:1 to max + * pipe scenario. For these cases, we will call + * commit_minimal_transition_state() to exit out of odm 2:1 + * first before processing new streams + */ + if (stream_count == dc->res_pool->pipe_count) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->next_odm_pipe) + handle_exit_odm2to1 = true; + } + } + + if (handle_exit_odm2to1) + res = commit_minimal_transition_state(dc, dc->current_state); + context = dc_create_state(dc); if (!context) goto context_alloc_fail; @@ -3912,6 +3933,7 @@ static bool commit_minimal_transition_state(struct dc *dc, unsigned int i, j; unsigned int pipe_in_use = 0; bool subvp_in_use = false; + bool odm_in_use = false; if (!transition_context) return false; @@ -3940,6 +3962,18 @@ static bool commit_minimal_transition_state(struct dc *dc, } } + /* If ODM is enabled and we are adding or removing planes from any ODM + * pipe, we must use the minimal transition. + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->next_odm_pipe) { + odm_in_use = true; + break; + } + } + /* When the OS add a new surface if we have been used all of pipes with odm combine * and mpc split feature, it need use commit_minimal_transition_state to transition safely. * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need @@ -3948,7 +3982,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially * enter/exit MPO when DCN still have enough resources. */ - if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { + if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) { dc_release_state(transition_context); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 7e1e5532f88f..c72540d37aef 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1445,6 +1445,26 @@ static int acquire_first_split_pipe( split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; split_pipe->pipe_idx = i; + split_pipe->stream = stream; + return i; + } else if (split_pipe->prev_odm_pipe && + split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { + split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; + if (split_pipe->next_odm_pipe) + split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; + + if (split_pipe->prev_odm_pipe->plane_state) + resource_build_scaling_params(split_pipe->prev_odm_pipe); + + memset(split_pipe, 0, sizeof(*split_pipe)); + split_pipe->stream_res.tg = pool->timing_generators[i]; + split_pipe->plane_res.hubp = pool->hubps[i]; + split_pipe->plane_res.ipp = pool->ipps[i]; + split_pipe->plane_res.dpp = pool->dpps[i]; + split_pipe->stream_res.opp = pool->opps[i]; + split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; + split_pipe->pipe_idx = i; + split_pipe->stream = stream; return i; } From 75c2b7ed080d7421157c03064be82275364136e7 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 18 Apr 2023 10:11:56 -0400 Subject: [PATCH 784/861] drm/amd/display: fix seamless odm transitions Add missing programming and function pointers Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Stylon Wang Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 +++++++++++ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index eaf9e9ccad2a..20f668d28364 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1741,6 +1741,17 @@ static void dcn20_program_pipe( if (hws->funcs.setup_vupdate_interrupt) hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); + + if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { + unsigned int k1_div, k2_div; + + hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); + + dc->res_pool->dccg->funcs->set_pixel_rate_div( + dc->res_pool->dccg, + pipe_ctx->stream_res.tg->inst, + k1_div, k2_div); + } } if (pipe_ctx->update_flags.bits.odm) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c index 6ef56fb32131..2cffedea2df5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c @@ -98,7 +98,7 @@ static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, i optc1->opp_count = opp_cnt; } -static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) +void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h index b92ba8c75694..abf0121a1006 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h @@ -179,5 +179,6 @@ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) void dcn32_timing_generator_init(struct optc *optc1); +void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); #endif /* __DC_OPTC_DCN32_H__ */ From 9bd443cb74bd47d820c3cc31ee0ed3008d004d73 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 2 Jun 2023 13:52:04 -0400 Subject: [PATCH 785/861] drm/amdgpu: fix debug wait on idle for gfx9.4.1 Wait calls for amd_ip_block_type not amd_hw_ip_block_type. Reported-by: Hamza Mahfooz Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 6df215aba4c4..625db444df1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -309,7 +309,7 @@ out: if (!suspend || r) return r; - return amdgpu_device_ip_wait_for_idle(adev, GC_HWIP); + return amdgpu_device_ip_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GFX); } static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_waitcnt) From 9f0bcf49e9895cb005d78b33a5eebfa11711b425 Mon Sep 17 00:00:00 2001 From: Chia-I Wu Date: Thu, 1 Jun 2023 15:44:12 -0700 Subject: [PATCH 786/861] amdgpu: validate offset_in_bo of drm_amdgpu_gem_va MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is motivated by OOB access in amdgpu_vm_update_range when offset_in_bo+map_size overflows. v2: keep the validations in amdgpu_vm_bo_map v3: add the validations to amdgpu_vm_bo_map/amdgpu_vm_bo_replace_map rather than to amdgpu_gem_va_ioctl Fixes: 9f7eb5367d00 ("drm/amdgpu: actually use the VM map parameters") Reviewed-by: Christian König Signed-off-by: Chia-I Wu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 22f9a65ca0fc..76d57bc7ac62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1434,14 +1434,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t eaddr; /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || - size == 0 || size & ~PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) + return -EINVAL; + if (saddr + size <= saddr || offset + size <= offset) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; - if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo)) || + if ((bo && offset + size > amdgpu_bo_size(bo)) || (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; @@ -1500,14 +1500,14 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, int r; /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || - size == 0 || size & ~PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) + return -EINVAL; + if (saddr + size <= saddr || offset + size <= offset) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; - if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo)) || + if ((bo && offset + size > amdgpu_bo_size(bo)) || (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; From 5be7d4e3cf9ef9853934daa03cf573723bae1650 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sun, 28 May 2023 14:26:37 -0600 Subject: [PATCH 787/861] drm/amdgpu/discovery: Replace fake flex-arrays with flexible-array members MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zero-length and one-element arrays are deprecated, and we are moving towards adopting C99 flexible-array members, instead. Use the DECLARE_FLEX_ARRAY() helper macro to transform zero-length arrays in a union into flexible-array members. And replace a one-element array with a C99 flexible-array member. Address the following warnings found with GCC-13 and -fstrict-flex-arrays=3 enabled: drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c:1009:89: warning: array subscript kk is outside array bounds of ‘uint32_t[0]’ {aka ‘unsigned int[]’} [-Warray-bounds=] drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c:1007:94: warning: array subscript kk is outside array bounds of ‘uint64_t[0]’ {aka ‘long long unsigned int[]’} [-Warray-bounds=] drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c:1310:94: warning: array subscript k is outside array bounds of ‘uint64_t[0]’ {aka ‘long long unsigned int[]’} [-Warray-bounds=] drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c:1309:57: warning: array subscript k is outside array bounds of ‘uint32_t[0]’ {aka ‘unsigned int[]’} [-Warray-bounds=] This helps with the ongoing efforts to tighten the FORTIFY_SOURCE routines on memcpy() and help us make progress towards globally enabling -fstrict-flex-arrays=3 [1]. This results in no differences in binary output. Link: https://github.com/KSPP/linux/issues/21 Link: https://github.com/KSPP/linux/issues/193 Link: https://github.com/KSPP/linux/issues/300 Link: https://gcc.gnu.org/pipermail/gcc-patches/2022-October/602902.html [1] Reviewed-by: Kees Cook Signed-off-by: Gustavo A. R. Silva Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/discovery.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index 9181e57887db..f43e29722ef7 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -122,7 +122,7 @@ typedef struct ip_v3 uint8_t sub_revision : 4; /* HCID Sub-Revision */ uint8_t variant : 4; /* HW variant */ #endif - uint32_t base_address[1]; /* Base Address list. Corresponds to the num_base_address field*/ + uint32_t base_address[]; /* Base Address list. Corresponds to the num_base_address field*/ } ip_v3; typedef struct ip_v4 { @@ -140,8 +140,8 @@ typedef struct ip_v4 { uint8_t sub_revision : 4; /* HCID Sub-Revision */ #endif union { - uint32_t base_address[0]; /* 32-bit Base Address list. Corresponds to the num_base_address field*/ - uint64_t base_address_64[0]; /* 64-bit Base Address list. Corresponds to the num_base_address field*/ + DECLARE_FLEX_ARRAY(uint32_t, base_address); /* 32-bit Base Address list. Corresponds to the num_base_address field*/ + DECLARE_FLEX_ARRAY(uint64_t, base_address_64); /* 64-bit Base Address list. Corresponds to the num_base_address field*/ } __packed; } ip_v4; From 2890662822def3dcc1b2e690d6fcea694c666083 Mon Sep 17 00:00:00 2001 From: "GONG, Ruiqi" Date: Fri, 2 Jun 2023 18:12:33 +0800 Subject: [PATCH 788/861] drm/amd/display: fix compilation error due to shifting negative value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently compiling linux-next with allmodconfig triggers the following error: ./drivers/gpu/drm/amd/amdgpu/../display/include/fixed31_32.h: In function ‘dc_fixpt_truncate’: ./drivers/gpu/drm/amd/amdgpu/../display/include/fixed31_32.h:528:22: error: left shift of negative value [-Werror=shift-negative-value] 528 | arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); | ^~ Use `unsigned long long` instead. Signed-off-by: GONG, Ruiqi Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index ece97ae0e826..d4cf7ead1d87 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -525,7 +525,7 @@ static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigne if (negative) arg.value = -arg.value; - arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); + arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); if (negative) arg.value = -arg.value; return arg; From 6b37fee590ec842f6e172c4f9c7dc4baadbdfda2 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 2 Jun 2023 14:02:45 +0530 Subject: [PATCH 789/861] drm/amd/display: Address kdoc warnings in dcn30_fpu.c Fixes the following gcc with W=1: display/dc/dml/dcn30/dcn30_fpu.c:677: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst * Finds dummy_latency_index when MCLK switching using firmware based display/dc/dml/dcn30/dcn30_fpu.c:688: warning: Function parameter or member 'dc' not described in 'dcn30_find_dummy_latency_index_for_fw_based_mclk_switch' display/dc/dml/dcn30/dcn30_fpu.c:688: warning: Function parameter or member 'context' not described in 'dcn30_find_dummy_latency_index_for_fw_based_mclk_switch' display/dc/dml/dcn30/dcn30_fpu.c:688: warning: Function parameter or member 'pipes' not described in 'dcn30_find_dummy_latency_index_for_fw_based_mclk_switch' display/dc/dml/dcn30/dcn30_fpu.c:688: warning: Function parameter or member 'pipe_cnt' not described in 'dcn30_find_dummy_latency_index_for_fw_based_mclk_switch' display/dc/dml/dcn30/dcn30_fpu.c:688: warning: Function parameter or member 'vlevel' not described in 'dcn30_find_dummy_latency_index_for_fw_based_mclk_switch' Cc: Rodrigo Siqueira Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index a352c703e258..ccb4ad78f667 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -674,10 +674,19 @@ void dcn30_fpu_update_bw_bounding_box(struct dc *dc, } /** - * Finds dummy_latency_index when MCLK switching using firmware based - * vblank stretch is enabled. This function will iterate through the - * table of dummy pstate latencies until the lowest value that allows + * dcn30_find_dummy_latency_index_for_fw_based_mclk_switch() - Finds + * dummy_latency_index when MCLK switching using firmware based vblank stretch + * is enabled. This function will iterate through the table of dummy pstate + * latencies until the lowest value that allows * dm_allow_self_refresh_and_mclk_switch to happen is found + * + * @dc: Current DC state + * @context: new dc state + * @pipes: DML pipe params + * @pipe_cnt: number of DML pipes + * @vlevel: Voltage level calculated by DML + * + * Return: lowest dummy_latency_index value */ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, struct dc_state *context, From d6634d4d92eac068e2136afab49dfb15a9efae74 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 2 Jun 2023 21:08:49 +0530 Subject: [PATCH 790/861] drm/amd/display: Add gnu_printf format attribute for snprintf_count() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following W=1 kernel build warning: display/dc/dcn10/dcn10_hw_sequencer_debug.c: In function ‘snprintf_count’: display/dc/dcn10/dcn10_hw_sequencer_debug.c:56:2: warning: function ‘snprintf_count’ might be a candidate for ‘gnu_printf’ format attribute [-Wsuggest-attribute=format] Use the __printf() attribute to let the compiler warn if invalid format strings are passed in. And fix the following checks: CHECK: Avoid CamelCase: +unsigned int __printf(3, 4) snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) CHECK: Avoid CamelCase: +unsigned int __printf(3, 4) snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) Cc: Hamza Mahfooz Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Aurabindo Pillai Signed-off-by: Srinivasan Shanmugam Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index a0f8e31d2adc..46a2ebcabd1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -45,7 +45,8 @@ #include "dcn10_cm_common.h" #include "clk_mgr.h" -unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) +__printf(3, 4) +unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...) { int ret_vsnprintf; unsigned int chars_printed; @@ -53,15 +54,15 @@ unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) va_list args; va_start(args, fmt); - ret_vsnprintf = vsnprintf(pBuf, bufSize, fmt, args); + ret_vsnprintf = vsnprintf(pbuf, bufsize, fmt, args); va_end(args); if (ret_vsnprintf > 0) { - if (ret_vsnprintf < bufSize) + if (ret_vsnprintf < bufsize) chars_printed = ret_vsnprintf; else - chars_printed = bufSize - 1; + chars_printed = bufsize - 1; } else chars_printed = 0; From 3cb4807dbf7f47f0a1368f78e7c37b1dd515c2dd Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Tue, 4 Apr 2023 12:53:54 -0400 Subject: [PATCH 791/861] Revert "drm/amd/display: Only use ODM2:1 policy for high pixel rate displays" This reverts commit 047783cdd5f604d87398236beb4971abb4d43293 since it causes higher power consumption for single display use case (4k60). Also, this patch introduced a 35% performance drop in a Vulkan benchmark. * The patch disabled the ODM-combination on most popular monitors, including 4K, 2K and FHD monitors at 60Hz. * ODM-combination can halve the DPP clock to save power, that is the reason why we introduce ODM-combination, and the PM log shows single pipe consumes more power at 4K@60Hz. * ODM-combination has 2 de-tiled buffer involved, which provides longer self-sustained time, that benefit to the memory power optimization. Signed-off-by: Aurabindo Pillai Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 1 - drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 2e6b39fe2613..8d68f02f5147 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1918,7 +1918,6 @@ int dcn32_populate_dml_pipes_from_context( context->stream_status[0].plane_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) && is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) && - pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ && dc->debug.enable_single_display_2to1_odm_policy && !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 58826e0aa76e..80bebdf43eca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -37,7 +37,6 @@ #define DCN3_2_MBLK_WIDTH 128 #define DCN3_2_MBLK_HEIGHT_4BPE 128 #define DCN3_2_MBLK_HEIGHT_8BPE 64 -#define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define SUBVP_HIGH_REFRESH_LIST_LEN 3 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 From 1598fc576420207e5c89088fc46610c2318e2f5c Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Thu, 1 Jun 2023 13:09:44 -0400 Subject: [PATCH 792/861] drm/amd/display: Program OTG vtotal min/max selectors unconditionally for DCN1+ For FPO/FAMS, DMCUB will try to change the output timings by writing to the OTG registers. However, the timings written directly to the OTG registers will not be honoured unless VMIN/VMAX selector registers are programmed with the right bits and trigger source is selected correctly. Proper solution needs to go into DMCUB but will require additional state tracking to ensure that the selectors are set and reset correctly as per driver state. Until fix is merged into firmware, apply the workaround in driver to unconditionally write OTG vmin/vmax selectors. Reviewed-by: Rodrigo Siqueira Signed-off-by: Aurabindo Pillai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 15 +++------------ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 10 ++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index e1975991e075..633989fd2514 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -930,19 +930,10 @@ void optc1_set_drr( OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); - - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); } + + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); } void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index d6f095b4555d..58bdbd859bf9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -462,6 +462,16 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + /* Set the min/max selectors unconditionally so that + * DMCUB fw may change OTG timings when necessary + * TODO: Remove the w/a after fixing the issue in DMCUB firmware + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + REG_SET_8(OTG_TRIGA_CNTL, 0, OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, From 25c30a12d718bd68ad91f58c7546eceaaf0feca5 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 2 Jun 2023 21:14:21 +0530 Subject: [PATCH 793/861] drm/amdgpu: Mark 'kgd_gfx_aldebaran_clear_address_watch' & 'kgd_gfx_v11_clear_address_watch' functions as static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Below two functions cause a warning because they lack a prototype: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c:164:10: warning: no previous prototype for ‘kgd_gfx_aldebaran_clear_address_watch’ [-Wmissing-prototypes] drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c:782:10: warning: no previous prototype for ‘kgd_gfx_v11_clear_address_watch’ [-Wmissing-prototypes] There are no callers from other files, so just mark them as 'static'. Also fixes the following checks: CHECK: Alignment should match open parenthesis +static uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev, uint32_t watch_id) CHECK: Alignment should match open parenthesis +static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev, uint32_t watch_id) Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index efd6a72aab4e..60f9e027fb66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -161,8 +161,8 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch( return watch_address_cntl; } -uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev, - uint32_t watch_id) +static uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) { return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 52efa690a3c2..91c3574ebed3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -779,8 +779,8 @@ static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev, return watch_address_cntl; } -uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev, - uint32_t watch_id) +static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) { return 0; } From 09a77a40b51a979557521d5a2d39e431564d5d23 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 31 May 2023 16:08:50 +0800 Subject: [PATCH 794/861] drm/amdgpu/pm: notify driver unloading to PMFW for SMU v13.0.6 dGPU Per requested, follow the same sequence as APU to send only PPSMC_MSG_PrepareForDriverUnload to PMFW during driver unloading. Signed-off-by: Le Ma Reviewed-by: Shiwu Zhang Reviewed-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 2611241a5ff1..a92ea4601ea4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1413,18 +1413,16 @@ static int smu_v13_0_6_system_features_control(struct smu_context *smu, bool enable) { struct amdgpu_device *adev = smu->adev; - int ret; + int ret = 0; - /* On APUs, notify FW that the device is no longer driver managed */ - if (adev->flags & AMD_IS_APU) { - if (!enable) - smu_v13_0_6_notify_unload(smu); - - return 0; + if (enable) { + if (!(adev->flags & AMD_IS_APU)) + ret = smu_v13_0_system_features_control(smu, enable); + } else { + /* Notify FW that the device is no longer driver managed */ + smu_v13_0_6_notify_unload(smu); } - ret = smu_v13_0_system_features_control(smu, enable); - return ret; } From d522ca2714b77e15ebe6e77c1db7468c11a81180 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Mon, 5 Jun 2023 11:15:34 +0800 Subject: [PATCH 795/861] drm/amd/pm: update smu-driver if header for smu 13.0.0 and smu 13.0.10 update smu-driver if header for smu 13.0.0 and smu 13.0.10 Signed-off-by: Kenneth Feng Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../inc/pmfw_if/smu13_driver_if_v13_0_0.h | 33 ++++++++++++++----- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index e656e82a0154..9dd1ed5b8940 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -24,10 +24,10 @@ #ifndef SMU13_DRIVER_IF_V13_0_0_H #define SMU13_DRIVER_IF_V13_0_0_H -#define SMU13_0_0_DRIVER_IF_VERSION 0x32 +#define SMU13_0_0_DRIVER_IF_VERSION 0x3D //Increment this version if SkuTable_t or BoardTable_t change -#define PPTABLE_VERSION 0x26 +#define PPTABLE_VERSION 0x2B #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_SOCCLK_DPM_LEVELS 8 @@ -96,7 +96,7 @@ #define FEATURE_ATHUB_MMHUB_PG_BIT 48 #define FEATURE_SOC_PCC_BIT 49 #define FEATURE_EDC_PWRBRK_BIT 50 -#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_BOMXCO_SVI3_PROG_BIT 51 #define FEATURE_SPARE_52_BIT 52 #define FEATURE_SPARE_53_BIT 53 #define FEATURE_SPARE_54_BIT 54 @@ -312,6 +312,7 @@ typedef enum { I2C_CONTROLLER_PROTOCOL_VR_IR35217, I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, I2C_CONTROLLER_PROTOCOL_COUNT, } I2cControllerProtocol_e; @@ -570,6 +571,7 @@ typedef enum { } POWER_SOURCE_e; typedef enum { + MEM_VENDOR_PLACEHOLDER0, MEM_VENDOR_SAMSUNG, MEM_VENDOR_INFINEON, MEM_VENDOR_ELPIDA, @@ -579,7 +581,6 @@ typedef enum { MEM_VENDOR_MOSEL, MEM_VENDOR_WINBOND, MEM_VENDOR_ESMT, - MEM_VENDOR_PLACEHOLDER0, MEM_VENDOR_PLACEHOLDER1, MEM_VENDOR_PLACEHOLDER2, MEM_VENDOR_PLACEHOLDER3, @@ -812,6 +813,9 @@ typedef enum { #define INVALID_BOARD_GPIO 0xFF +#define MARKETING_BASE_CLOCKS 0 +#define MARKETING_GAME_CLOCKS 1 +#define MARKETING_BOOST_CLOCKS 2 typedef struct { //PLL 0 @@ -1102,10 +1106,15 @@ typedef struct { uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + uint8_t FoptEnabled; + uint8_t DcsSpare2[3]; + uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation + uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation - uint32_t DcsSpare[16]; + uint32_t DcsSpare[11]; // UCLK section + uint16_t ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS]; // In MHz uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations uint8_t PaddingMem[3]; @@ -1251,8 +1260,13 @@ typedef struct { QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix + uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron + uint16_t TemperatureFwCtfLimit_Hynix; + uint16_t TemperatureFwCtfLimit_Micron; + // SECTION: Sku Reserved - uint32_t Spare[43]; + uint32_t Spare[41]; // Padding for MMHUB - do not modify this uint32_t MmHubPadding[8]; @@ -1324,8 +1338,9 @@ typedef struct { // UCLK Spread Spectrum uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT]; + uint8_t GfxclkSpreadEnable; + // FCLK Spread Spectrum - uint8_t FclkSpreadPadding; uint8_t FclkSpreadPercent; // Q4.4 uint16_t FclkSpreadFreq; // kHz @@ -1450,6 +1465,8 @@ typedef struct { uint8_t ThrottlingPercentage[THROTTLER_COUNT]; + uint8_t VmaxThrottlingPercentage; + uint8_t Padding1[3]; //metrics for D3hot entry/exit and driver ARM msgs uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; @@ -1469,7 +1486,7 @@ typedef struct { typedef struct { SmuMetrics_t SmuMetrics; - uint32_t Spare[30]; + uint32_t Spare[29]; // Padding - ignore uint32_t MmHubPadding[8]; // SMU internal use From 61a7c162399590263bf5f1ff5d2de634bfe3ae8d Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Wed, 29 Mar 2023 22:03:09 +0800 Subject: [PATCH 796/861] drm/amdgpu: pass xcc mask to ras ta pass xcc mask to ras ta, ras ta will compare the mask with the one from chiplet topology. Changed from V1: Remove IP version checking. Set ras_cmd->ras_init_message.init_flags.xcc_mask directly due to xcc_mask is common structres to all the devices. Signed-off-by: Stanley.Yang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 ++ drivers/gpu/drm/amd/amdgpu/ta_ras_if.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 789cc16e1be7..3c7c9fca2391 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1655,6 +1655,8 @@ int psp_ras_initialize(struct psp_context *psp) ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; if (!adev->gmc.xgmi.connected_to_cpu) ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; + ras_cmd->ras_in_message.init_flags.xcc_mask = + adev->gfx.xcc_mask; ret = psp_ta_load(psp, &psp->ras_context.context); diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 30d0482ac466..be2984ac00a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -129,6 +129,7 @@ struct ta_ras_trigger_error_input { struct ta_ras_init_flags { uint8_t poison_mode_en; uint8_t dgpu_mode; + uint16_t xcc_mask; }; struct ta_ras_output_flags { From e3959cb5479cd24baf9687734d5591b8e2ee08d4 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Fri, 21 Apr 2023 21:14:36 +0800 Subject: [PATCH 797/861] drm/amdgpu: support check vcn jpeg block mask Support VCN/JPEG instance mask checking, pass logical mask directly except GFX/SDMA/VCN/JPEG blocks. Changed from V1: correct a typo Signed-off-by: Stanley.Yang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index f2da69adcd9d..5bd1bdb363ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -356,8 +356,12 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, case AMDGPU_RAS_BLOCK__SDMA: mask = GENMASK(adev->sdma.num_instances - 1, 0); break; + case AMDGPU_RAS_BLOCK__VCN: + case AMDGPU_RAS_BLOCK__JPEG: + mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); + break; default: - mask = 0; + mask = inst_mask; break; } From 3898c8fc42b247c26c43c26873895b87fe58b477 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Fri, 21 Apr 2023 20:58:39 +0800 Subject: [PATCH 798/861] drm/amdgpu: convert vcn/jpeg logical mask to physical mask Changed from V1: Remove amdgpu_ras_logical_mask_to_physical_mask due to GET_MASK provides same feature. Support convert VCN/JPEG logical mask to physical mask. Signed-off-by: Stanley.Yang Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 3c7c9fca2391..27559a05a67b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1691,6 +1691,10 @@ int psp_ras_trigger_error(struct psp_context *psp, case TA_RAS_BLOCK__SDMA: dev_mask = GET_MASK(SDMA0, instance_mask); break; + case TA_RAS_BLOCK__VCN: + case TA_RAS_BLOCK__JPEG: + dev_mask = GET_MASK(VCN, instance_mask); + break; default: dev_mask = instance_mask; break; From a15a77c8e61d2db075cc6e2104bfdebf5c9b966c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 2 Jun 2023 14:32:55 -0400 Subject: [PATCH 799/861] Revert "drm/amdgpu: change the reference clock for raven/raven2" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit fbc24293ca16b3b9ef891fe32ccd04735a6f8dc1. This results in inconsistent timing reported via asynchronous GPU queries. Link: https://lists.freedesktop.org/archives/amd-gfx/2023-May/093731.html Cc: Jesse.Zhang@amd.com Cc: michel@daenzer.net Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 135440b5afe9..afcaeadda4c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -325,10 +325,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) return 10000; + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + return reference_clock / 4; return reference_clock; } From f9bfc9fff2997abe3c1a560a38a0c359775e7ec5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 2 Jun 2023 14:34:12 -0400 Subject: [PATCH 800/861] Revert "drm/amdgpu: Differentiate between Raven2 and Raven/Picasso according to revision id" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 9d2d1827af295fd6971786672c41c4dba3657154. This results in inconsistent timing reported via asynchronous GPU queries. Link: https://lists.freedesktop.org/archives/amd-gfx/2023-May/093731.html Cc: Jesse.Zhang@amd.com Cc: michel@daenzer.net Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 35 +++++++++++++++------------ 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f092a1dbdb56..3bc0b100936b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4023,25 +4023,30 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) clock = clock_lo | (clock_hi << 32ULL); break; case IP_VERSION(9, 1, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); + /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; case IP_VERSION(9, 2, 2): preempt_disable(); - if (adev->rev_id >= 0x8) { - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - } else { - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - } + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ + * roughly every 42 seconds. + */ if (hi_check != clock_hi) { - if (adev->rev_id >= 0x8) - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - else - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); clock_hi = hi_check; } preempt_enable(); From 5a03159ab7ef456ba22460e47a9d0eab2f310424 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 2 Jun 2023 14:37:13 -0400 Subject: [PATCH 801/861] Revert "drm/amdgpu: switch to golden tsc registers for raven/raven2" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit f03eb1d26c2739b75580f58bbab4ab2d5d3eba46. This results in inconsistent timing reported via asynchronous GPU queries. Link: https://lists.freedesktop.org/archives/amd-gfx/2023-May/093731.html Cc: Jesse.Zhang@amd.com Cc: michel@daenzer.net Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 40 --------------------------- 1 file changed, 40 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3bc0b100936b..4073e2ee7e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin"); #define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 -#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a -#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b -#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0 - -#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068 -#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0 - enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -4022,36 +4012,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) preempt_enable(); clock = clock_lo | (clock_hi << 32ULL); break; - case IP_VERSION(9, 1, 0): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; - case IP_VERSION(9, 2, 2): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; default: amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); From e6850f98efc70277dc0e941e905182738e7327a0 Mon Sep 17 00:00:00 2001 From: Min Li Date: Sat, 3 Jun 2023 15:43:45 +0800 Subject: [PATCH 802/861] drm/radeon: fix race condition UAF in radeon_gem_set_domain_ioctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Userspace can race to free the gobj(robj converted from), robj should not be accessed again after drm_gem_object_put, otherwith it will result in use-after-free. Reviewed-by: Christian König Signed-off-by: Min Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_gem.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 5de99ffa072f..d0119c5f7eb3 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; - struct radeon_bo *robj; int r; /* for now if someone requests domain CPU - @@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, up_read(&rdev->exclusive_lock); return -ENOENT; } - robj = gem_to_radeon_bo(gobj); r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); drm_gem_object_put(gobj); up_read(&rdev->exclusive_lock); - r = radeon_gem_handle_lockup(robj->rdev, r); + r = radeon_gem_handle_lockup(rdev, r); return r; } From 5a863904bab4f5d22012f7d68ab2becafc303a40 Mon Sep 17 00:00:00 2001 From: Chia-I Wu Date: Thu, 1 Jun 2023 14:48:08 -0700 Subject: [PATCH 803/861] drm/amdgpu: fix xclk freq on CHIP_STONEY According to Alex, most APUs from that time seem to have the same issue (vbios says 48Mhz, actual is 100Mhz). I only have a CHIP_STONEY so I limit the fixup to CHIP_STONEY Signed-off-by: Chia-I Wu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 770f2d7a371f..6a8494f98d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - if (adev->flags & AMD_IS_APU) - return reference_clock; + if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_STONEY: + /* vbios says 48Mhz, but the actual freq is 100Mhz */ + return 10000; + default: + return reference_clock; + } + } tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) From 869bcf59fd64382e3b23b219e791e6e5ebf1114e Mon Sep 17 00:00:00 2001 From: YiPeng Chai Date: Wed, 24 May 2023 17:14:15 +0800 Subject: [PATCH 804/861] drm/amdgpu: change reserved vram info print MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The link object of mgr->reserved_pages is the blocks variable in struct amdgpu_vram_reservation, not the link variable in struct drm_buddy_block. Signed-off-by: YiPeng Chai Reviewed-by: Arunpravin Paneer Selvam Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 89d35d194f2c..c7085a747b03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -839,7 +839,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct amdgpu_vram_reservation *rsv; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); @@ -851,8 +851,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, drm_buddy_print(mm, printer); drm_printf(printer, "reserved:\n"); - list_for_each_entry(block, &mgr->reserved_pages, link) - drm_buddy_block_print(mm, block, printer); + list_for_each_entry(rsv, &mgr->reserved_pages, blocks) + drm_printf(printer, "%#018llx-%#018llx: %llu\n", + rsv->start, rsv->start + rsv->size, rsv->size); mutex_unlock(&mgr->lock); } From 49f26218c344741cb3eaa740b1e44e960551a87f Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 16 May 2023 15:50:40 -0400 Subject: [PATCH 805/861] drm/amd/display: fix dcn315 single stream crb allocation Change to improve avoiding asymetric crb calculations for single stream scenarios. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Stylon Wang Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn315/dcn315_resource.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index cb95e978417b..8570bdc292b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1628,6 +1628,10 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) int i; struct resource_context *res_ctx = &context->res_ctx; + /*Don't apply for single stream*/ + if (context->stream_count < 2) + return false; + for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) continue; @@ -1727,19 +1731,23 @@ static int dcn315_populate_dml_pipes_from_context( pipe_cnt++; } - /* Spread remaining unreserved crb evenly among all pipes, use default policy if not enough det or single pipe */ + /* Spread remaining unreserved crb evenly among all pipes*/ if (pixel_rate_crb) { for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) { pipe = &res_ctx->pipe_ctx[i]; if (!pipe->stream) continue; + /* Do not use asymetric crb if not enough for pstate support */ + if (remaining_det_segs < 0) { + pipes[pipe_cnt].pipe.src.det_size_override = 0; + continue; + } + if (!pipe->top_pipe && !pipe->prev_odm_pipe) { bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - if (remaining_det_segs < 0 || crb_pipes == 1) - pipes[pipe_cnt].pipe.src.det_size_override = 0; if (remaining_det_segs > MIN_RESERVED_DET_SEGS) pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); @@ -1755,6 +1763,7 @@ static int dcn315_populate_dml_pipes_from_context( } /* Convert segments into size for DML use */ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + crb_idx++; } pipe_cnt++; From 0baae624630788862bbd654741929007971e9d5b Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 18 May 2023 11:30:44 -0400 Subject: [PATCH 806/861] drm/amd/display: Refactor fast update to use new HWSS build sequence [Description] - Refactor HW sequencer to use a build / execute sequence - Also move gamma updates to become fast v2: squash in build fix ("drm/amd/display: Fix guarding of 'if (dc->debug.visual_confirm)'") Acked-by: Stylon Wang Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 274 ++++++++++++++++-- .../drm/amd/display/dc/core/dc_hw_sequencer.c | 255 ++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../amd/display/dc/dce100/dce100_resource.c | 5 + .../amd/display/dc/dce110/dce110_resource.c | 5 + .../amd/display/dc/dce112/dce112_resource.c | 5 + .../amd/display/dc/dce120/dce120_resource.c | 1 + .../drm/amd/display/dc/dce80/dce80_resource.c | 6 + .../drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + .../drm/amd/display/dc/dcn20/dcn20_resource.c | 1 + .../amd/display/dc/dcn201/dcn201_resource.c | 1 + .../drm/amd/display/dc/dcn21/dcn21_resource.c | 1 + .../drm/amd/display/dc/dcn31/dcn31_resource.c | 1 + .../amd/display/dc/dcn315/dcn315_resource.c | 1 + .../amd/display/dc/dcn316/dcn316_resource.c | 1 + .../drm/amd/display/dc/dcn32/dcn32_hwseq.c | 24 ++ .../drm/amd/display/dc/dcn32/dcn32_hwseq.h | 2 + .../gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 1 + .../gpu/drm/amd/display/dc/inc/core_types.h | 11 + .../gpu/drm/amd/display/dc/inc/hw_sequencer.h | 126 ++++++++ 20 files changed, 693 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0e9403f21dae..60da6d834518 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2589,15 +2589,19 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change - || update_flags->bits.lut_3d - || update_flags->bits.gamma_change - || update_flags->bits.gamut_remap_change) { + if (update_flags->bits.lut_3d) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } + if (dc->debug.enable_legacy_fast_update && + (update_flags->bits.gamma_change || + update_flags->bits.gamut_remap_change || + update_flags->bits.input_csc_change || + update_flags->bits.coeff_reduction_change)) { + type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, type); + } return overall_type; } @@ -2630,7 +2634,7 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (stream_update->out_transfer_func) + if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) @@ -2661,6 +2665,12 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; + + /* Output transfer function changes do not require bandwidth recalculation, + * so don't trigger a full update + */ + if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + su_flags->bits.out_tf = 1; } for (i = 0 ; i < surface_count; i++) { @@ -3412,6 +3422,166 @@ void dc_dmub_update_dirty_rect(struct dc *dc, } } +static void build_dmub_update_dirty_rect( + struct dc *dc, + int surface_count, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + union dmub_rb_cmd cmd; + struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; + unsigned int i, j; + unsigned int panel_inst = 0; + + if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) + return; + + if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) + return; + + memset(&cmd, 0x0, sizeof(cmd)); + cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; + cmd.update_dirty_rect.header.sub_type = 0; + cmd.update_dirty_rect.header.payload_bytes = + sizeof(cmd.update_dirty_rect) - + sizeof(cmd.update_dirty_rect.header); + update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; + + if (!srf_updates[i].surface || !flip_addr) + continue; + /* Do not send in immediate flip mode */ + if (srf_updates[i].surface->flip_immediate) + continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; + memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, + sizeof(flip_addr->dirty_rects)); + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->stream != stream) + continue; + if (pipe_ctx->plane_state != plane_state) + continue; + update_dirty_rect->panel_inst = panel_inst; + update_dirty_rect->pipe_idx = j; + dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; + dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + (*dmub_cmd_count)++; + } + } +} + + +/** + * ************************************************************************************************ + * build_dmub_cmd_list: Build an array of DMCUB commands to be sent to DMCUB + * + * @param [in]: dc: Current DC state + * @param [in]: srf_updates: Array of surface updates + * @param [in]: surface_count: Number of surfaces that have an updated + * @param [in]: stream: Correponding stream to be updated in the current flip + * @param [in]: context: New DC state to be programmed + * + * @param [out]: dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB + * @param [out]: dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array + * + * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required + * to build an array of commands and have them sent while the OTG lock is acquired. + * + * @return: void + * ************************************************************************************************ + */ +static void build_dmub_cmd_list(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_state *context, + struct dc_dmub_cmd dc_dmub_cmd[], + unsigned int *dmub_cmd_count) +{ + // Initialize cmd count to 0 + *dmub_cmd_count = 0; + build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); +} + +static void commit_planes_for_stream_fast(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *context) +{ + int i, j; + struct pipe_ctx *top_pipe_to_program = NULL; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + dc_z10_restore(dc); + + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->top_pipe && + !pipe_ctx->prev_odm_pipe && + pipe_ctx->stream && + pipe_ctx->stream == stream) { + top_pipe_to_program = pipe_ctx; + } + } + + if (dc->debug.visual_confirm) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + dc_update_viusal_confirm_color(dc, context, pipe); + } + } + + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *plane_state = srf_updates[i].surface; + /*set logical flag for lock/unlock use*/ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->plane_state) + continue; + if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + continue; + pipe_ctx->plane_state->triplebuffer_flips = false; + if (update_type == UPDATE_TYPE_FAST && + dc->hwss.program_triplebuffer && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; + } + } + } + + build_dmub_cmd_list(dc, + srf_updates, + surface_count, + stream, + context, + context->dc_dmub_cmd, + &(context->dmub_cmd_count)); + hwss_build_fast_sequence(dc, + context->dc_dmub_cmd, + context->dmub_cmd_count, + context->block_sequence, + &(context->block_sequence_steps), + top_pipe_to_program); + hwss_execute_sequence(dc, + context->block_sequence, + context->block_sequence_steps); +} + static void commit_planes_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, @@ -3449,21 +3619,6 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; - - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; - } - } - if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); @@ -4046,6 +4201,43 @@ static bool commit_minimal_transition_state(struct dc *dc, return true; } +/** + * ******************************************************************************* + * update_seamless_boot_flags: Helper function for updating seamless boot flags + * + * @param [in]: dc: Current DC state + * @param [in]: context: New DC state to be programmed + * @param [in]: surface_count: Number of surfaces that have an updated + * @param [in]: stream: Correponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * @return: void + * ******************************************************************************* + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -4112,14 +4304,25 @@ bool dc_update_planes_and_stream(struct dc *dc, update_type = UPDATE_TYPE_FULL; } - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); + update_seamless_boot_flags(dc, context, surface_count, stream); + if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } if (dc->current_state != context) { @@ -4244,7 +4447,17 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - commit_planes_for_stream( + update_seamless_boot_flags(dc, context, surface_count, stream); + if (!dc->debug.enable_legacy_fast_update && update_type == UPDATE_TYPE_FAST) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( dc, srf_updates, surface_count, @@ -4252,6 +4465,7 @@ void dc_commit_updates_for_stream(struct dc *dc, stream_update, update_type, context); + } /*update current_State*/ if (dc->current_state != context) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 8a98b8dd008e..182c42c63bc5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -27,6 +27,8 @@ #include "core_types.h" #include "timing_generator.h" #include "hw_sequencer.h" +#include "hw_sequencer_private.h" +#include "basics/dc_common.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -463,6 +465,259 @@ void get_subvp_visual_confirm_color( } } +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx) +{ + struct dc_plane_state *plane = pipe_ctx->plane_state; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dce_hwseq *hws = dc->hwseq; + struct pipe_ctx *current_pipe = NULL; + struct pipe_ctx *current_mpc_pipe = NULL; + unsigned int i = 0; + + *num_steps = 0; // Initialize to 0 + + if (!plane || !stream) + return; + + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = true; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + + for (i = 0; i < dmub_cmd_count; i++) { + block_sequence[*num_steps].params.send_dmcub_cmd_params.ctx = dc->ctx; + block_sequence[*num_steps].params.send_dmcub_cmd_params.cmd = &(dc_dmub_cmd[i].dmub_cmd); + block_sequence[*num_steps].params.send_dmcub_cmd_params.wait_type = dc_dmub_cmd[i].wait_type; + block_sequence[*num_steps].func = DMUB_SEND_DMCUB_CMD; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + while (current_mpc_pipe) { + if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; + block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*num_steps)++; + } + if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; + block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; + block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*num_steps)++; + } + if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { + block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; + block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*num_steps)++; + } + + if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { + block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; + block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { + block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { + block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_SETUP_DPP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { + block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*num_steps)++; + } + if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) { + block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_output_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_output_transfer_func_params.stream = current_mpc_pipe->stream; + block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*num_steps)++; + } + + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } + + if (dc->hwss.pipe_control_lock) { + block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc; + block_sequence[*num_steps].params.pipe_control_lock_params.lock = false; + block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*num_steps)++; + } + if (dc->hwss.subvp_pipe_control_lock_fast) { + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; + (*num_steps)++; + } + + current_pipe = pipe_ctx; + while (current_pipe) { + current_mpc_pipe = current_pipe; + + while (current_mpc_pipe) { + if (!current_mpc_pipe->bottom_pipe && !pipe_ctx->next_odm_pipe && + current_mpc_pipe->stream && current_mpc_pipe->plane_state && + current_mpc_pipe->plane_state->update_flags.bits.addr_update && + !current_mpc_pipe->plane_state->skip_manual_trigger) { + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*num_steps)++; + } + current_mpc_pipe = current_mpc_pipe->bottom_pipe; + } + current_pipe = current_pipe->next_odm_pipe; + } +} + +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps) +{ + unsigned int i; + union block_sequence_params *params; + struct dce_hwseq *hws = dc->hwseq; + + for (i = 0; i < num_steps; i++) { + params = &(block_sequence[i].params); + switch (block_sequence[i].func) { + + case DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST: + dc->hwss.subvp_pipe_control_lock_fast(params); + break; + case OPTC_PIPE_CONTROL_LOCK: + dc->hwss.pipe_control_lock(params->pipe_control_lock_params.dc, + params->pipe_control_lock_params.pipe_ctx, + params->pipe_control_lock_params.lock); + break; + case HUBP_SET_FLIP_CONTROL_GSL: + dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, + params->set_flip_control_gsl_params.flip_immediate); + break; + case HUBP_PROGRAM_TRIPLEBUFFER: + dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, + params->program_triplebuffer_params.pipe_ctx, + params->program_triplebuffer_params.enableTripleBuffer); + break; + case HUBP_UPDATE_PLANE_ADDR: + dc->hwss.update_plane_addr(params->update_plane_addr_params.dc, + params->update_plane_addr_params.pipe_ctx); + break; + case DPP_SET_INPUT_TRANSFER_FUNC: + hws->funcs.set_input_transfer_func(params->set_input_transfer_func_params.dc, + params->set_input_transfer_func_params.pipe_ctx, + params->set_input_transfer_func_params.plane_state); + break; + case DPP_PROGRAM_GAMUT_REMAP: + dc->hwss.program_gamut_remap(params->program_gamut_remap_params.pipe_ctx); + break; + case DPP_SETUP_DPP: + hwss_setup_dpp(params); + break; + case DPP_PROGRAM_BIAS_AND_SCALE: + hwss_program_bias_and_scale(params); + break; + case OPTC_PROGRAM_MANUAL_TRIGGER: + hwss_program_manual_trigger(params); + break; + case DPP_SET_OUTPUT_TRANSFER_FUNC: + hws->funcs.set_output_transfer_func(params->set_output_transfer_func_params.dc, + params->set_output_transfer_func_params.pipe_ctx, + params->set_output_transfer_func_params.stream); + break; + case MPC_UPDATE_VISUAL_CONFIRM: + dc->hwss.update_visual_confirm_color(params->update_visual_confirm_params.dc, + params->update_visual_confirm_params.pipe_ctx, + params->update_visual_confirm_params.mpcc_id); + break; + case DMUB_SEND_DMCUB_CMD: + hwss_send_dmcub_cmd(params); + break; + default: + ASSERT(false); + break; + } + } +} + +void hwss_send_dmcub_cmd(union block_sequence_params *params) +{ + struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; + union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd; + enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type; + + dm_execute_dmub_cmd(ctx, cmd, wait_type); +} + +void hwss_program_manual_trigger(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; + + if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); +} + +void hwss_setup_dpp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->setup_dpp_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + + if (dpp && dpp->funcs->dpp_setup) { + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + } +} + +void hwss_program_bias_and_scale(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dc_bias_and_scale bns_params = {0}; + + //TODO :for CNVC set scale and bias registers if necessary + build_prescale_params(&bns_params, plane_state); + if (dpp->funcs->dpp_program_bias_and_scale) + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); +} + void get_mclk_switch_visual_confirm_color( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7fd9f5a9e191..7ded574f84ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -896,6 +896,7 @@ struct dc_debug_options { bool disable_dp_plus_plus_wa; uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; + bool enable_legacy_fast_update; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 54805802cbd5..42e9b6a529f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -401,6 +401,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -1071,6 +1075,7 @@ static bool dce100_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index a4a45a6ce61e..46eca5a21e1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -424,6 +424,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .per_pixel_alpha = 1, @@ -1368,6 +1372,7 @@ static bool dce110_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index e179e80667d1..808855886183 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -429,6 +429,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + #define CTX ctx #define REG(reg) mm ## reg @@ -1239,6 +1243,7 @@ static bool dce112_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index af631085e88c..18c5a86d2d61 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -526,6 +526,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults = { .disable_clock_gate = true, + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 5825e6f412bd..3935fd455f0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -418,6 +418,10 @@ static const struct dc_plane_cap plane_cap = { } }; +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dce_dmcu_registers dmcu_regs = { DMCU_DCE80_REG_LIST() }; @@ -969,6 +973,7 @@ static bool dce80_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; /************************************************* * Create resources * @@ -1369,6 +1374,7 @@ static bool dce83_construct( dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; + dc->debug = debug_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 26ddf73fd5b1..4b02f8443534 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -553,6 +553,7 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6ef7e2634991..4cc8de2627ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; void dcn20_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 5ff09bf4bc30..fdba8a9f5c30 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -613,6 +613,7 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = false, + .enable_legacy_fast_update = true, }; static void dcn201_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index da6d42de0554..d693ea42d033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -653,6 +653,7 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index f2d589c505a8..fc33b5fcabe1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -887,6 +887,7 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, + .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index 8570bdc292b4..0cc853964781 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -887,6 +887,7 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index b6e0aa2ab27a..707cf28bbceb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -884,6 +884,7 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 7f5cd8c8d49b..3f11992e380b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -409,6 +409,30 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, } } +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) +{ + struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; + bool lock = params->subvp_pipe_control_lock_fast_params.lock; + struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; + bool subvp_immediate_flip = false; + + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { + if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && + pipe_ctx->plane_state->flip_immediate) + subvp_immediate_flip = true; + } + + // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. + if (subvp_immediate_flip) { + union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; + + hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; + hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; + hw_lock_cmd.bits.lock = lock; + hw_lock_cmd.bits.should_release = !lock; + dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); + } +} bool dcn32_set_mpc_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 6dbe929cf599..2d2628f31bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -84,6 +84,8 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, struct pipe_ctx *top_pipe_to_program, bool subvp_prev_use); +void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params); + void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 8356b31e1d9a..c2490e16a66a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .enable_phantom_streams = dcn32_enable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d8dd143cf6ea..034610b74a37 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -37,6 +37,7 @@ #include "dwb.h" #include "mcif_wb.h" #include "panel_cntl.h" +#include "dmub/inc/dmub_cmd.h" #define MAX_CLOCK_SOURCES 7 #define MAX_SVP_PHANTOM_STREAMS 2 @@ -499,6 +500,11 @@ struct bw_context { struct display_mode_lib dml; }; +struct dc_dmub_cmd { + union dmub_rb_cmd dmub_cmd; + enum dm_dmub_wait_type wait_type; +}; + /** * struct dc_state - The full description of a state requested by users */ @@ -547,6 +553,11 @@ struct dc_state { */ struct bw_context bw_ctx; + struct block_sequence block_sequence[50]; + unsigned int block_sequence_steps; + struct dc_dmub_cmd dc_dmub_cmd[10]; + unsigned int dmub_cmd_count; + /** * @refcount: refcount reference * diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index df160c6a630c..cc0a3a992f7b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -44,6 +44,112 @@ struct dc_virtual_addr_space_config; struct dpp; struct dce_hwseq; struct link_resource; +struct dc_dmub_cmd; + +struct subvp_pipe_control_lock_fast_params { + struct dc *dc; + bool lock; + struct pipe_ctx *pipe_ctx; +}; + +struct pipe_control_lock_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool lock; +}; + +struct set_flip_control_gsl_params { + struct pipe_ctx *pipe_ctx; + bool flip_immediate; +}; + +struct program_triplebuffer_params { + const struct dc *dc; + struct pipe_ctx *pipe_ctx; + bool enableTripleBuffer; +}; + +struct update_plane_addr_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_input_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_plane_state *plane_state; +}; + +struct program_gamut_remap_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_manual_trigger_params { + struct pipe_ctx *pipe_ctx; +}; + +struct send_dmcub_cmd_params { + struct dc_context *ctx; + union dmub_rb_cmd *cmd; + enum dm_dmub_wait_type wait_type; +}; + +struct setup_dpp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_bias_and_scale_params { + struct pipe_ctx *pipe_ctx; +}; + +struct set_output_transfer_func_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + const struct dc_stream_state *stream; +}; + +struct update_visual_confirm_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + int mpcc_id; +}; + +union block_sequence_params { + struct update_plane_addr_params update_plane_addr_params; + struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; + struct pipe_control_lock_params pipe_control_lock_params; + struct set_flip_control_gsl_params set_flip_control_gsl_params; + struct program_triplebuffer_params program_triplebuffer_params; + struct set_input_transfer_func_params set_input_transfer_func_params; + struct program_gamut_remap_params program_gamut_remap_params; + struct program_manual_trigger_params program_manual_trigger_params; + struct send_dmcub_cmd_params send_dmcub_cmd_params; + struct setup_dpp_params setup_dpp_params; + struct program_bias_and_scale_params program_bias_and_scale_params; + struct set_output_transfer_func_params set_output_transfer_func_params; + struct update_visual_confirm_params update_visual_confirm_params; +}; + +enum block_sequence_func { + DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST = 0, + OPTC_PIPE_CONTROL_LOCK, + HUBP_SET_FLIP_CONTROL_GSL, + HUBP_PROGRAM_TRIPLEBUFFER, + HUBP_UPDATE_PLANE_ADDR, + DPP_SET_INPUT_TRANSFER_FUNC, + DPP_PROGRAM_GAMUT_REMAP, + OPTC_PROGRAM_MANUAL_TRIGGER, + DMUB_SEND_DMCUB_CMD, + DPP_SETUP_DPP, + DPP_PROGRAM_BIAS_AND_SCALE, + DPP_SET_OUTPUT_TRANSFER_FUNC, + MPC_UPDATE_VISUAL_CONFIRM, +}; + +struct block_sequence { + union block_sequence_params params; + enum block_sequence_func func; +}; struct hw_sequencer_funcs { void (*hardware_release)(struct dc *dc); @@ -252,6 +358,7 @@ struct hw_sequencer_funcs { const struct tg_color *solid_color, int width, int height, int offset); + void (*subvp_pipe_control_lock_fast)(union block_sequence_params *params); void (*z10_restore)(const struct dc *dc); void (*z10_save_init)(struct dc *dc); @@ -313,4 +420,23 @@ void get_mclk_switch_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void hwss_execute_sequence(struct dc *dc, + struct block_sequence block_sequence[], + int num_steps); + +void hwss_build_fast_sequence(struct dc *dc, + struct dc_dmub_cmd *dc_dmub_cmd, + unsigned int dmub_cmd_count, + struct block_sequence block_sequence[], + int *num_steps, + struct pipe_ctx *pipe_ctx); + +void hwss_send_dmcub_cmd(union block_sequence_params *params); + +void hwss_program_manual_trigger(union block_sequence_params *params); + +void hwss_setup_dpp(union block_sequence_params *params); + +void hwss_program_bias_and_scale(union block_sequence_params *params); + #endif /* __DC_HW_SEQUENCER_H__ */ From 490ddccb84fe2f6165b2bdd2d00fd4ab593b95ec Mon Sep 17 00:00:00 2001 From: Saaem Rizvi Date: Thu, 18 May 2023 12:12:20 -0400 Subject: [PATCH 807/861] drm/amd/display: Wrong index type for pipe iterator [Why and How] Type mismatch in index and pipe count might cause an infinite loop. code Change should resolve this issue. Acked-by: Stylon Wang Signed-off-by: Saaem Rizvi Reviewed-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 46b6f4f9e1fd..ce7e6f20b31f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -392,7 +392,7 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) { - uint8_t i; + unsigned int i; struct pipe_ctx *pipe = NULL; bool otg_disabled[MAX_PIPES] = {false}; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 3f11992e380b..00f32ffe0079 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -1201,7 +1201,7 @@ void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) { - uint8_t i; + unsigned int i; struct pipe_ctx *pipe = NULL; bool otg_disabled[MAX_PIPES] = {false}; From 8e7b3f5435b3c0751515c973972ebb11e0fc0fb5 Mon Sep 17 00:00:00 2001 From: Max Tseng Date: Tue, 25 Apr 2023 15:05:17 +0800 Subject: [PATCH 808/861] drm/amd/display: Add control flag to dc_stream_state to skip eDP BL off/link off Add control flag to dc_stream_state to skip eDP BL off/link off. Acked-by: Stylon Wang Signed-off-by: Max Tseng Reviewed-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + .../gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ++- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 7 ++++--- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index d5b3e3a32cc6..3697ea1d14c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -302,6 +302,7 @@ struct dc_stream_state { bool vblank_synchronized; bool fpo_in_use; struct mall_stream_config mall_stream_config; + bool skip_edp_power_down; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index d4cacb8df631..6c9ca43d1040 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1216,7 +1216,8 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - hws->funcs.edp_backlight_control(link, false); + if (!stream->skip_edp_power_down) + hws->funcs.edp_backlight_control(link, false); link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index f7f1a1586f3b..1a7b93e41e35 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2207,9 +2207,8 @@ static enum dc_status enable_link( * link settings. Need to call disable first before enabling at * new link settings. */ - if (link->link_status.link_active) { + if (link->link_status.link_active && !stream->skip_edp_power_down) disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - } switch (pipe_ctx->stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT: @@ -2327,7 +2326,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_stream(pipe_ctx); } else { dc->hwss.disable_stream(pipe_ctx); - disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + if (!pipe_ctx->stream->skip_edp_power_down) { + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + } } if (pipe_ctx->stream->timing.flags.DSC) { From bbd069a860b78a087d20d91656a5026c0196586b Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 19 May 2023 11:38:15 -0400 Subject: [PATCH 809/861] drm/amd/display: Reduce sdp bw after urgent to 90% [Description] Reduce expected SDP bandwidth due to poor QoS and arbitration issues on high bandwidth configs Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Acked-by: Stylon Wang Signed-off-by: Alvin Lee Reviewed-by: Nevenko Stupar Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 7e03c844b05e..ad6ee48580f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -147,7 +147,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_sdp_bw_after_urgent = 100.0, + .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented From 35c7b59e3691cbea91c8b91e8ec4b0a4a960dd5a Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Tue, 6 Jun 2023 17:36:45 +0530 Subject: [PATCH 810/861] =?UTF-8?q?drm/amd/display:=20Fix=20unused=20varia?= =?UTF-8?q?ble=20=E2=80=98should=5Flock=5Fall=5Fpipes=E2=80=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below compilation error: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:3524:7: error: unused variable 'should_lock_all_pipes' [-Werror,-Wunused-variable] bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); Cc: Rodrigo Siqueira Cc: Aurabindo Pillai Cc: Harry Wentland Cc: Alex Deucher Reviewed-by: Hamza Mahfooz Signed-off-by: Srinivasan Shanmugam Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 60da6d834518..be72e03cdf92 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3521,7 +3521,6 @@ static void commit_planes_for_stream_fast(struct dc *dc, { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; - bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); dc_z10_restore(dc); for (j = 0; j < dc->res_pool->pipe_count; j++) { From caf0f98dc280b5426b1858d4e70130aa3dd9679c Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Mon, 5 Jun 2023 13:59:08 -0400 Subject: [PATCH 811/861] drm/amd/display: mark dml31's UseMinimumDCFCLK() as noinline_for_stack clang reports: drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn31/display_mode_vba_31.c:3797:6: error: stack frame size (2632) exceeds limit (2048) in 'dml31_ModeSupportAndSystemConfigurationFull' [-Werror,-Wframe-larger-than] 3797 | void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) | ^ 1 error generated. So, since UseMinimumDCFCLK() consumes a lot of stack space, mark it as noinline_for_stack to prevent it from blowing up dml31_ModeSupportAndSystemConfigurationFull()'s stack size. Acked-by: Alex Deucher Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 01603abd75bb..43016c462251 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -7032,7 +7032,7 @@ static double CalculateUrgentLatency( return ret; } -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxPrefetchMode, int ReorderingBytes) From 82054942472745c2caa4f6b31b4174401348b887 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Mon, 5 Jun 2023 14:18:49 -0400 Subject: [PATCH 812/861] drm/amd/display: mark dml314's UseMinimumDCFCLK() as noinline_for_stack clang reports: drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn314/display_mode_vba_314.c:3892:6: error: stack frame size (2632) exceeds limit (2048) in 'dml314_ModeSupportAndSystemConfigurationFull' [-Werror,-Wframe-larger-than] 3892 | void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) | ^ 1 error generated. So, since UseMinimumDCFCLK() consumes a lot of stack space, mark it as noinline_for_stack to prevent it from blowing up dml314_ModeSupportAndSystemConfigurationFull()'s stack size. Acked-by: Alex Deucher Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 27b83162ae45..1532a7e0ed6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -7061,7 +7061,7 @@ static double CalculateUrgentLatency( return ret; } -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxPrefetchMode, int ReorderingBytes) From d155cfff48499d1e973976519ca81a7d9bab2cc3 Mon Sep 17 00:00:00 2001 From: Sui Jingfeng Date: Tue, 6 Jun 2023 21:33:28 +0800 Subject: [PATCH 813/861] drm/amdgpu: display/Kconfig: replace leading spaces with tab This patch replace the leading spaces with tab, make them keep aligned with the rest of the config options. No functional change. Signed-off-by: Sui Jingfeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index b015c7d19531..4e99b8836827 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -42,16 +42,13 @@ config DEBUG_KERNEL_DC Choose this option if you want to hit kdgb_break in assert. config DRM_AMD_SECURE_DISPLAY - bool "Enable secure display support" - depends on DEBUG_FS - depends on DRM_AMD_DC_FP - help - Choose this option if you want to - support secure display - - This option enables the calculation - of crc of specific region via debugfs. - Cooperate with specific DMCU FW. + bool "Enable secure display support" + depends on DEBUG_FS + depends on DRM_AMD_DC_FP + help + Choose this option if you want to support secure display + This option enables the calculation of crc of specific region via + debugfs. Cooperate with specific DMCU FW. endmenu From b7588507152148eaf0f19feb98c65b72ab40a726 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 6 Jun 2023 11:33:46 +0300 Subject: [PATCH 814/861] drm/amd/pm: Fix memory some memory corruption The "od_table" is a pointer to a large struct, but this code is doing pointer math as if it were pointing to bytes. It results in writing far outside the struct. Fixes: 2e8452ea4ef6 ("drm/amd/pm: fulfill the OD support for SMU13.0.0") Fixes: 2a9aa52e4617 ("drm/amd/pm: fulfill the OD support for SMU13.0.7") Reviewed-by: Evan Quan Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 5ac5ea770c1c..413e592f0ed6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1535,7 +1535,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, * settings. Thus we do not cache it. */ offset_of_featurectrlmask = offsetof(OverDriveTable_t, FeatureCtrlMask); - if (memcmp(od_table + offset_of_featurectrlmask, + if (memcmp((u8 *)od_table + offset_of_featurectrlmask, table_context->user_overdrive_table + offset_of_featurectrlmask, sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask)) { smu_v13_0_0_dump_od_table(smu, od_table); @@ -1548,7 +1548,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask = 0; memcpy(table_context->user_overdrive_table + offset_of_featurectrlmask, - od_table + offset_of_featurectrlmask, + (u8 *)od_table + offset_of_featurectrlmask, sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask); if (!memcmp(table_context->user_overdrive_table, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 0bd086360efa..cda4e818aab7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1524,7 +1524,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, * settings. Thus we do not cache it. */ offset_of_featurectrlmask = offsetof(OverDriveTable_t, FeatureCtrlMask); - if (memcmp(od_table + offset_of_featurectrlmask, + if (memcmp((u8 *)od_table + offset_of_featurectrlmask, table_context->user_overdrive_table + offset_of_featurectrlmask, sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask)) { smu_v13_0_7_dump_od_table(smu, od_table); @@ -1537,7 +1537,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask = 0; memcpy(table_context->user_overdrive_table + offset_of_featurectrlmask, - od_table + offset_of_featurectrlmask, + (u8 *)od_table + offset_of_featurectrlmask, sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask); if (!memcmp(table_context->user_overdrive_table, From 33e82119cfb2a957f250f92a1e4c4db2b06400db Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Tue, 6 Jun 2023 11:42:53 -0400 Subject: [PATCH 815/861] drm/amd/display: Only use ODM2:1 policy for high pixel rate displays We only gain a benefit of using the ODM2:1 dynamic policy if it allow us to decrease DISPCLK to use the VMIN freq. If the display config can already achieve VMIN DISPCLK freq without ODM2:1, don't apply the policy. This patch was reverted but that causes some IGT regressions. To unblock, the patch is being applied again until IGT failures are fixed. Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Aurabindo Pillai Reviewed-by: Rodrigo Siqueira --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 8d68f02f5147..2e6b39fe2613 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1918,6 +1918,7 @@ int dcn32_populate_dml_pipes_from_context( context->stream_status[0].plane_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) && is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) && + pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ && dc->debug.enable_single_display_2to1_odm_policy && !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 80bebdf43eca..2f34f01b3ea1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -40,6 +40,7 @@ #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define SUBVP_HIGH_REFRESH_LIST_LEN 3 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 +#define DCN3_2_VMIN_DISPCLK_HZ 717000000 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) From 8be295046748432c53a2dee39c469f63c60b0ec3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 6 Jun 2023 19:29:51 -0400 Subject: [PATCH 816/861] drm/amdkfd: potential error pointer dereference in ioctl The "target" either comes from kfd_create_process() which returns error pointers on error or kfd_lookup_process_by_pid() which returns NULL on error. So we need to check for both types of errors. Fixes: 0ab2d7532b05 ("drm/amdkfd: prepare per-process debug enable and disable") Signed-off-by: Dan Carpenter Reviewed-by: Jonathan Kim Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index cce2abe12e1b..d655c5bc951f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2920,9 +2920,9 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v target = kfd_lookup_process_by_pid(pid); } - if (!target) { + if (IS_ERR_OR_NULL(target)) { pr_debug("Cannot find process PID %i to debug\n", args->pid); - r = -ESRCH; + r = target ? PTR_ERR(target) : -ESRCH; goto out; } From f2bcc0c7db0c004f0184675e7862648e8aa197f9 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Tue, 6 Jun 2023 14:27:04 +0800 Subject: [PATCH 817/861] drm/amdgpu/mmsch: Correct the definition for mmsch init header For the header, it is version related, shouldn't use MAX_VCN_INSTANCES. Signed-off-by: Emily Deng Reviewed-by: Feifei Xu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h | 4 +++- drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h | 4 +++- drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h index 3e4e858a6965..a773ef61b78c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h @@ -30,6 +30,8 @@ #define MMSCH_VERSION_MINOR 0 #define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) +#define MMSCH_V3_0_VCN_INSTANCES 0x2 + enum mmsch_v3_0_command_type { MMSCH_COMMAND__DIRECT_REG_WRITE = 0, MMSCH_COMMAND__DIRECT_REG_POLLING = 2, @@ -47,7 +49,7 @@ struct mmsch_v3_0_table_info { struct mmsch_v3_0_init_header { uint32_t version; uint32_t total_size; - struct mmsch_v3_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES]; + struct mmsch_v3_0_table_info inst[MMSCH_V3_0_VCN_INSTANCES]; }; struct mmsch_v3_0_cmd_direct_reg_header { diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h index 83653a50a1a2..796d4f8791e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h @@ -43,6 +43,8 @@ #define MMSCH_VF_MAILBOX_RESP__OK 0x1 #define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 +#define MMSCH_V4_0_VCN_INSTANCES 0x2 + enum mmsch_v4_0_command_type { MMSCH_COMMAND__DIRECT_REG_WRITE = 0, MMSCH_COMMAND__DIRECT_REG_POLLING = 2, @@ -60,7 +62,7 @@ struct mmsch_v4_0_table_info { struct mmsch_v4_0_init_header { uint32_t version; uint32_t total_size; - struct mmsch_v4_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES]; + struct mmsch_v4_0_table_info inst[MMSCH_V4_0_VCN_INSTANCES]; struct mmsch_v4_0_table_info jpegdec; }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 70fefbf26c48..c8f63b3c6f69 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1313,7 +1313,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) header.version = MMSCH_VERSION; header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2; - for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) { + for (i = 0; i < MMSCH_V3_0_VCN_INSTANCES; i++) { header.inst[i].init_status = 0; header.inst[i].table_offset = 0; header.inst[i].table_size = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 60c3fd20e8ce..8d371faaa2b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -1239,7 +1239,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) header.version = MMSCH_VERSION; header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2; - for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) { + for (i = 0; i < MMSCH_V4_0_VCN_INSTANCES; i++) { header.inst[i].init_status = 0; header.inst[i].table_offset = 0; header.inst[i].table_size = 0; From 8ff865be93e642d0ad66ca7369f42fbe36dc6a90 Mon Sep 17 00:00:00 2001 From: Jiadong Zhu Date: Thu, 25 May 2023 16:52:55 +0800 Subject: [PATCH 818/861] drm/amdgpu: Modify indirect buffer packages for resubmission When the preempted IB frame resubmitted to cp, we need to modify the frame data including: 1. set PRE_RESUME 1 in CONTEXT_CONTROL. 2. use meta data(DE and CE) read from CSA in WRITE_DATA. Add functions to save the location the first time IBs emitted and callback to patch the package when resubmission happens. Signed-off-by: Jiadong Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 18 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 9 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c | 60 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h | 15 +++++ 4 files changed, 102 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index d8a78de1f335..da26c555af24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -670,3 +670,21 @@ void amdgpu_ring_ib_end(struct amdgpu_ring *ring) if (ring->is_sw_ring) amdgpu_sw_ring_ib_end(ring); } + +void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL); +} + +void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE); +} + +void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4a4c9f89c302..21ffb9ce32ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -229,6 +229,9 @@ struct amdgpu_ring_funcs { int (*preempt_ib)(struct amdgpu_ring *ring); void (*emit_mem_sync)(struct amdgpu_ring *ring); void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); + void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); + void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); + void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); }; struct amdgpu_ring { @@ -323,11 +326,17 @@ struct amdgpu_ring { #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) +#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) +#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) +#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_ring_ib_end(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index 62079f0e3ee8..73516abef662 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -105,6 +105,16 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux) amdgpu_fence_update_start_timestamp(e->ring, chunk->sync_seq, ktime_get()); + if (chunk->sync_seq == + le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { + if (chunk->cntl_offset <= e->ring->buf_mask) + amdgpu_ring_patch_cntl(e->ring, + chunk->cntl_offset); + if (chunk->ce_offset <= e->ring->buf_mask) + amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); + if (chunk->de_offset <= e->ring->buf_mask) + amdgpu_ring_patch_de(e->ring, chunk->de_offset); + } amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, chunk->start, chunk->end); @@ -407,6 +417,17 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) amdgpu_ring_mux_end_ib(mux, ring); } +void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ring_mux *mux = &adev->gfx.muxer; + unsigned offset; + + offset = ring->wptr & ring->buf_mask; + + amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); +} + void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) { struct amdgpu_mux_entry *e; @@ -429,6 +450,10 @@ void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *r } chunk->start = ring->wptr; + /* the initialized value used to check if they are set by the ib submission*/ + chunk->cntl_offset = ring->buf_mask + 1; + chunk->de_offset = ring->buf_mask + 1; + chunk->ce_offset = ring->buf_mask + 1; list_add_tail(&chunk->entry, &e->list); } @@ -454,6 +479,41 @@ static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct a } } +void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, + struct amdgpu_ring *ring, u64 offset, + enum amdgpu_ring_mux_offset_type type) +{ + struct amdgpu_mux_entry *e; + struct amdgpu_mux_chunk *chunk; + + e = amdgpu_ring_mux_sw_entry(mux, ring); + if (!e) { + DRM_ERROR("cannot find entry!\n"); + return; + } + + chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); + if (!chunk) { + DRM_ERROR("cannot find chunk!\n"); + return; + } + + switch (type) { + case AMDGPU_MUX_OFFSET_TYPE_CONTROL: + chunk->cntl_offset = offset; + break; + case AMDGPU_MUX_OFFSET_TYPE_DE: + chunk->de_offset = offset; + break; + case AMDGPU_MUX_OFFSET_TYPE_CE: + chunk->ce_offset = offset; + break; + default: + DRM_ERROR("invalid type (%d)\n", type); + break; + } +} + void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) { struct amdgpu_mux_entry *e; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h index 4be45fc14954..b22d4fb2a847 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h @@ -50,6 +50,12 @@ struct amdgpu_mux_entry { struct list_head list; }; +enum amdgpu_ring_mux_offset_type { + AMDGPU_MUX_OFFSET_TYPE_CONTROL, + AMDGPU_MUX_OFFSET_TYPE_DE, + AMDGPU_MUX_OFFSET_TYPE_CE, +}; + struct amdgpu_ring_mux { struct amdgpu_ring *real_ring; @@ -72,12 +78,18 @@ struct amdgpu_ring_mux { * @sync_seq: the fence seqno related with the saved IB. * @start:- start location on the software ring. * @end:- end location on the software ring. + * @control_offset:- the PRE_RESUME bit position used for resubmission. + * @de_offset:- the anchor in write_data for de meta of resubmission. + * @ce_offset:- the anchor in write_data for ce meta of resubmission. */ struct amdgpu_mux_chunk { struct list_head entry; uint32_t sync_seq; u64 start; u64 end; + u64 cntl_offset; + u64 de_offset; + u64 ce_offset; }; int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, @@ -89,6 +101,8 @@ u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ri u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); +void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, + u64 offset, enum amdgpu_ring_mux_offset_type type); bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux); u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring); @@ -97,6 +111,7 @@ void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring); void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring); +void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type); const char *amdgpu_sw_ring_name(int idx); unsigned int amdgpu_sw_ring_priority(int idx); From ea791e704b97ab5abd563b6d2f88c4019940079e Mon Sep 17 00:00:00 2001 From: Jiadong Zhu Date: Thu, 25 May 2023 18:42:15 +0800 Subject: [PATCH 819/861] drm/amdgpu: Implement gfx9 patch functions for resubmission Patch the packages including CONTEXT_CONTROL and WRITE_DATA for gfx9 during the resubmission scenario. Signed-off-by: Jiadong Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 80 +++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4073e2ee7e6d..65577eca58f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5165,9 +5165,83 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, #endif lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_ib_on_emit_cntl(ring); amdgpu_ring_write(ring, control); } +static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring, + unsigned offset) +{ + u32 control = ring->ring[offset]; + + control |= INDIRECT_BUFFER_PRE_RESUME(1); + ring->ring[offset] = control; +} + +static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring, + unsigned offset) +{ + struct amdgpu_device *adev = ring->adev; + void *ce_payload_cpu_addr; + uint64_t payload_offset, payload_size; + + payload_size = sizeof(struct v9_ce_ib_state); + + if (ring->is_mes_queue) { + payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); + } else { + payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; + } + + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { + memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size); + } else { + memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, + (ring->buf_mask + 1 - offset) << 2); + payload_size -= (ring->buf_mask + 1 - offset) << 2; + memcpy((void *)&ring->ring[0], + ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2), + payload_size); + } +} + +static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring, + unsigned offset) +{ + struct amdgpu_device *adev = ring->adev; + void *de_payload_cpu_addr; + uint64_t payload_offset, payload_size; + + payload_size = sizeof(struct v9_de_ib_state); + + if (ring->is_mes_queue) { + payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); + } else { + payload_offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; + } + + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { + memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size); + } else { + memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, + (ring->buf_mask + 1 - offset) << 2); + payload_size -= (ring->buf_mask + 1 - offset) << 2; + memcpy((void *)&ring->ring[0], + de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2), + payload_size); + } +} + static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, @@ -5363,6 +5437,8 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr)); + amdgpu_ring_ib_on_emit_ce(ring); + if (resume) amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr, sizeof(ce_payload) >> 2); @@ -5474,6 +5550,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); + amdgpu_ring_ib_on_emit_de(ring); if (resume) amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, sizeof(de_payload) >> 2); @@ -6884,6 +6961,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .patch_cntl = gfx_v9_0_ring_patch_cntl, + .patch_de = gfx_v9_0_ring_patch_de_meta, + .patch_ce = gfx_v9_0_ring_patch_ce_meta, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { From 71c79a196096bf51603322760dc6a95e2eb82ac1 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Tue, 30 May 2023 21:48:17 +0800 Subject: [PATCH 820/861] drm/amdgpu: Rename ras table version Rename RAS_TABLE_VER to RAS_TABLE_VER_V1, move RAS_TABLE_VER_V1 from amdgpu_ras_eeprom.c to amdgpu_ras_eeprom.h. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index c2c2a7718613..e9a848db3ff2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -68,7 +68,6 @@ /* Table hdr is 'AMDR' */ #define RAS_TABLE_HDR_VAL 0x414d4452 -#define RAS_TABLE_VER 0x00010000 /* Bad GPU tag ‘BADG’ */ #define RAS_TABLE_HDR_BAD 0x42414447 @@ -289,7 +288,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) mutex_lock(&control->ras_tbl_mutex); hdr->header = RAS_TABLE_HDR_VAL; - hdr->version = RAS_TABLE_VER; + hdr->version = RAS_TABLE_VER_V1; hdr->first_rec_offset = RAS_RECORD_START; hdr->tbl_size = RAS_TABLE_HEADER_SIZE; @@ -575,7 +574,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) control->tbl_hdr.header = RAS_TABLE_HDR_BAD; } - control->tbl_hdr.version = RAS_TABLE_VER; + control->tbl_hdr.version = RAS_TABLE_VER_V1; control->tbl_hdr.first_rec_offset = RAS_INDEX_TO_OFFSET(control, control->ras_fri); control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; control->tbl_hdr.checksum = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 54d9bfe0881d..6d21cfdeffa5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -26,6 +26,8 @@ #include +#define RAS_TABLE_VER_V1 0x00010000 + struct amdgpu_device; enum amdgpu_ras_eeprom_err_type { From 65183faec89f3ef2c781f2ed6803e6ed5c365d48 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Tue, 30 May 2023 22:38:17 +0800 Subject: [PATCH 821/861] drm/amdgpu: Add RAS table v2.1 macro definition Add RAS EEPROM table version 2.1 macro definition. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index e9a848db3ff2..e835f68d1ebb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -80,6 +80,15 @@ #define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \ / RAS_TABLE_RECORD_SIZE) +/* EEPROM Table V2_1 */ +#define RAS_TABLE_V2_1_INFO_SIZE 256 +#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE +#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \ + RAS_TABLE_V2_1_INFO_SIZE) +#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + /* Given a zero-based index of an EEPROM RAS record, yields the EEPROM * offset off of RAS_TABLE_START. That is, this is something you can * add to control->i2c_address, and then tell I2C layer to read @@ -102,6 +111,10 @@ #define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE) +#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE) + #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 6d21cfdeffa5..069249249c76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -27,6 +27,7 @@ #include #define RAS_TABLE_VER_V1 0x00010000 +#define RAS_TABLE_VER_V2_1 0x00021000 struct amdgpu_device; From b573cf88c0d0a1f71873ca36edf0e20d4b9a82a8 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Tue, 30 May 2023 22:48:34 +0800 Subject: [PATCH 822/861] drm/amdgpu: Support setting EEPROM table version Add setting EEPROM table version interface for umcv8.10, Add EEPROM table v2.1 to UMC v8.10. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 2 ++ drivers/gpu/drm/amd/amdgpu/umc_v8_10.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 86133f77a9a4..43321f57f557 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -59,6 +59,8 @@ struct amdgpu_umc_ras { void *ras_error_status); void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); + /* support different eeprom table version for different asic */ + void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index d51ae0bc36f7..46bfdee79bfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -444,6 +444,11 @@ static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *ade umc_v8_10_ecc_info_query_error_address, ras_error_status); } +static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr) +{ + hdr->version = RAS_TABLE_VER_V2_1; +} + const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = { .query_ras_error_count = umc_v8_10_query_ras_error_count, .query_ras_error_address = umc_v8_10_query_ras_error_address, @@ -457,4 +462,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = { .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address, + .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version, }; From 7f599fed3b13fe97dcd6f68bf8a5c62abb91d0a4 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Wed, 31 May 2023 10:37:09 +0800 Subject: [PATCH 823/861] drm/amdgpu: Add support EEPROM table v2.1 Add ras info to EEPROM table, app can analyse device ECC status without GPU driver through EEPROM table ras info. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 204 ++++++++++++++++-- .../gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h | 12 +- 3 files changed, 203 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 5bd1bdb363ca..a6c3265cdbc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2314,7 +2314,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) atomic_set(&con->in_recovery, 0); con->eeprom_control.bad_channel_bitmap = 0; - max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); + max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); /* Todo: During test the SMU might fail to read the eeprom through I2C diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index e835f68d1ebb..7f134c3b0b14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -72,6 +72,20 @@ /* Bad GPU tag ‘BADG’ */ #define RAS_TABLE_HDR_BAD 0x42414447 +/** + * EEPROM Table structure v1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + /* Assume 2-Mbit size EEPROM and take up the whole space. */ #define RAS_TBL_SIZE_BYTES (256 * 1024) #define RAS_TABLE_START 0 @@ -80,6 +94,26 @@ #define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \ / RAS_TABLE_RECORD_SIZE) +/** + * EEPROM Table structrue v2.1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | EEPROM TABLE RAS INFO | + * | (available info size 4 Bytes) | + * | ( reserved size 252 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + /* EEPROM Table V2_1 */ #define RAS_TABLE_V2_1_INFO_SIZE 256 #define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE @@ -242,6 +276,69 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control) return res; } +static void +__encode_table_ras_info_to_buf(struct amdgpu_ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = ((uint32_t)(rai->rma_status) & 0xFF) | + (((uint32_t)(rai->health_percent) << 8) & 0xFF00) | + (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000); + pp[0] = cpu_to_le32(tmp); +} + +static void +__decode_table_ras_info_from_buf(struct amdgpu_ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = le32_to_cpu(pp[0]); + rai->rma_status = tmp & 0xFF; + rai->health_percent = (tmp >> 8) & 0xFF; + rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF; +} + +static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + u8 *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + DRM_ERROR("Failed to alloc buf to write table ras info\n"); + return -ENOMEM; + } + + __encode_table_ras_info_to_buf(&control->tbl_rai, buf); + + /* i2c may be unstable in gpu reset */ + down_read(&adev->reset_domain->sem); + res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus, + control->i2c_address + + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + up_read(&adev->reset_domain->sem); + + if (res < 0) { + DRM_ERROR("Failed to write EEPROM table ras info:%d", res); + } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { + DRM_ERROR("Short write:%d out of %d\n", + res, RAS_TABLE_V2_1_INFO_SIZE); + res = -EIO; + } else { + res = 0; + } + + kfree(buf); + + return res; +} + static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control) { int ii; @@ -301,14 +398,27 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) mutex_lock(&control->ras_tbl_mutex); hdr->header = RAS_TABLE_HDR_VAL; - hdr->version = RAS_TABLE_VER_V1; - hdr->first_rec_offset = RAS_RECORD_START; - hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + if (adev->umc.ras && + adev->umc.ras->set_eeprom_table_version) + adev->umc.ras->set_eeprom_table_version(hdr); + else + hdr->version = RAS_TABLE_VER_V1; + + if (hdr->version == RAS_TABLE_VER_V2_1) { + hdr->first_rec_offset = RAS_RECORD_START_V2_1; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE; + } else { + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + } csum = __calc_hdr_byte_sum(control); csum = -csum; hdr->checksum = csum; res = __write_table_header(control); + if (!res && hdr->version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); control->ras_num_recs = 0; control->ras_fri = 0; @@ -587,9 +697,13 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) control->tbl_hdr.header = RAS_TABLE_HDR_BAD; } - control->tbl_hdr.version = RAS_TABLE_VER_V1; - control->tbl_hdr.first_rec_offset = RAS_INDEX_TO_OFFSET(control, control->ras_fri); - control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; control->tbl_hdr.checksum = 0; buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; @@ -629,6 +743,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) csum = -csum; control->tbl_hdr.checksum = csum; res = __write_table_header(control); + if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); Out: kfree(buf); return res; @@ -819,9 +935,12 @@ Out: return res; } -uint32_t amdgpu_ras_eeprom_max_record_count(void) +uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control) { - return RAS_MAX_RECORD_COUNT; + if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) + return RAS_MAX_RECORD_COUNT_V2_1; + else + return RAS_MAX_RECORD_COUNT; } static ssize_t @@ -1063,8 +1182,14 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control int buf_size, res; u8 csum, *buf, *pp; - buf_size = RAS_TABLE_HEADER_SIZE + - control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) + buf_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + buf_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { DRM_ERROR("Out of memory checking RAS table checksum.\n"); @@ -1092,6 +1217,39 @@ Out: return res < 0 ? res : csum; } +static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai; + struct amdgpu_device *adev = to_amdgpu_device(control); + unsigned char *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n"); + return -ENOMEM; + } + + /** + * EEPROM table V2_1 supports ras info, + * read EEPROM table ras info + */ + res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, + control->i2c_address + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + if (res < RAS_TABLE_V2_1_INFO_SIZE) { + DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res); + res = res >= 0 ? -EIO : res; + goto Out; + } + + __decode_table_ras_info_from_buf(rai, buf); + +Out: + kfree(buf); + return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res; +} + int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, bool *exceed_err_limit) { @@ -1114,8 +1272,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, return -EINVAL; control->ras_header_offset = RAS_HDR_START; - control->ras_record_offset = RAS_RECORD_START; - control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + control->ras_info_offset = RAS_TABLE_V2_1_INFO_START; mutex_init(&control->ras_tbl_mutex); /* Read the table header from EEPROM address */ @@ -1129,12 +1286,27 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, __decode_table_header_from_buf(hdr, buf); - control->ras_num_recs = RAS_NUM_RECS(hdr); + if (hdr->version == RAS_TABLE_VER_V2_1) { + control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr); + control->ras_record_offset = RAS_RECORD_START_V2_1; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; + } else { + control->ras_num_recs = RAS_NUM_RECS(hdr); + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + } control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); if (hdr->header == RAS_TABLE_HDR_VAL) { DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", control->ras_num_recs); + + if (hdr->version == RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + res = __verify_ras_table_checksum(control); if (res) DRM_ERROR("RAS table incorrect checksum or error:%d\n", @@ -1148,6 +1320,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, ras->bad_page_cnt_threshold); } else if (hdr->header == RAS_TABLE_HDR_BAD && amdgpu_bad_page_threshold != 0) { + if (hdr->version == RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + res = __verify_ras_table_checksum(control); if (res) DRM_ERROR("RAS Table incorrect checksum or error:%d\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 069249249c76..3c5575c19bf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -46,9 +46,18 @@ struct amdgpu_ras_eeprom_table_header { uint32_t checksum; } __packed; +struct amdgpu_ras_eeprom_table_ras_info { + u8 rma_status; + u8 health_percent; + u16 ecc_page_threshold; + u32 padding[64 - 1]; +} __packed; + struct amdgpu_ras_eeprom_control { struct amdgpu_ras_eeprom_table_header tbl_hdr; + struct amdgpu_ras_eeprom_table_ras_info tbl_rai; + /* Base I2C EEPPROM 19-bit memory address, * where the table is located. For more information, * see top of amdgpu_eeprom.c. @@ -61,6 +70,7 @@ struct amdgpu_ras_eeprom_control { * right after the header. */ u32 ras_header_offset; + u32 ras_info_offset; u32 ras_record_offset; /* Number of records in the table. @@ -127,7 +137,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, const u32 num); -uint32_t amdgpu_ras_eeprom_max_record_count(void); +uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); From 7c2551fa1dfdb06a9dd3a6c629086fe2c348e00a Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Mon, 29 May 2023 10:17:59 +0800 Subject: [PATCH 824/861] drm/amdgpu: Calculate EEPROM table ras info bytes sum It's more reasonable to check EEPROM table ras info bytes. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 7f134c3b0b14..9eceb3bc1058 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -355,6 +355,21 @@ static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control) return csum; } +static u8 __calc_ras_info_byte_sum(const struct amdgpu_ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + size_t sz; + + sz = sizeof(control->tbl_rai); + pp = (u8 *) &control->tbl_rai; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + static int amdgpu_ras_eeprom_correct_header_tag( struct amdgpu_ras_eeprom_control *control, uint32_t header) @@ -414,6 +429,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) } csum = __calc_hdr_byte_sum(control); + if (hdr->version == RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); csum = -csum; hdr->checksum = csum; res = __write_table_header(control); @@ -739,6 +756,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) csum += *pp; csum += __calc_hdr_byte_sum(control); + if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); /* avoid sign extension when assigning to "checksum" */ csum = -csum; control->tbl_hdr.checksum = csum; From 0bc3137b2157115f328859477b463c912d605c3a Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Thu, 1 Jun 2023 20:56:42 +0800 Subject: [PATCH 825/861] drm/amdgpu: Set EEPROM ras info Set EEPROM ras info: rma status, health percent and bad page threshold. Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 24 +++++++++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h | 5 ++++ 2 files changed, 29 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 9eceb3bc1058..c2e8f6491ac6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -406,6 +406,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) { struct amdgpu_device *adev = to_amdgpu_device(control); struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); u8 csum; int res; @@ -423,6 +424,14 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) hdr->first_rec_offset = RAS_RECORD_START_V2_1; hdr->tbl_size = RAS_TABLE_HEADER_SIZE + RAS_TABLE_V2_1_INFO_SIZE; + rai->rma_status = GPU_HEALTH_USABLE; + /** + * GPU health represented as a percentage. + * 0 means worst health, 100 means fully health. + */ + rai->health_percent = 100; + /* ecc_page_threshold = 0 means disable bad page retirement */ + rai->ecc_page_threshold = con->bad_page_cnt_threshold; } else { hdr->first_rec_offset = RAS_RECORD_START; hdr->tbl_size = RAS_TABLE_HEADER_SIZE; @@ -712,6 +721,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) "Saved bad pages %d reaches threshold value %d\n", control->ras_num_recs, ras->bad_page_cnt_threshold); control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } } if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) @@ -749,6 +762,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) goto Out; } + /** + * bad page records have been stored in eeprom, + * now calculate gpu health percent + */ + if (amdgpu_bad_page_threshold != 0 && + control->tbl_hdr.version == RAS_TABLE_VER_V2_1 && + control->ras_num_recs < ras->bad_page_cnt_threshold) + control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold - + control->ras_num_recs) * 100) / + ras->bad_page_cnt_threshold; + /* Recalc the checksum. */ csum = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 3c5575c19bf8..6dfd667f3013 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -31,6 +31,11 @@ struct amdgpu_device; +enum amdgpu_ras_gpu_health_status { + GPU_HEALTH_USABLE = 0, + GPU_RETIRED__ECC_REACH_THRESHOLD = 2, +}; + enum amdgpu_ras_eeprom_err_type { AMDGPU_RAS_EEPROM_ERR_NA, AMDGPU_RAS_EEPROM_ERR_RECOVERABLE, From 7386f88ab1732af890a09ab3a7f400bb20adbe5a Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 7 Jun 2023 03:56:27 -0400 Subject: [PATCH 826/861] drm/amdkfd: fix vmfault signalling with additional data. Exception handling for vmfaults should be raised with additional data. Reported-by: Mukul Joshi Signed-off-by: Jonathan Kim Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 34 +++++++++++++++---------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 0a5e7b172a64..ddca23ee4193 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -1240,19 +1240,24 @@ void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, return; } - memset(&memory_exception_data, 0, sizeof(memory_exception_data)); - memory_exception_data.gpu_id = user_gpu_id; - memory_exception_data.failure.imprecise = true; - /* Set failure reason */ - if (info) { - memory_exception_data.va = (info->page_addr) << PAGE_SHIFT; - memory_exception_data.failure.NotPresent = - info->prot_valid ? 1 : 0; - memory_exception_data.failure.NoExecute = - info->prot_exec ? 1 : 0; - memory_exception_data.failure.ReadOnly = - info->prot_write ? 1 : 0; - memory_exception_data.failure.imprecise = 0; + /* SoC15 chips and onwards will pass in data from now on. */ + if (!data) { + memset(&memory_exception_data, 0, sizeof(memory_exception_data)); + memory_exception_data.gpu_id = user_gpu_id; + memory_exception_data.failure.imprecise = true; + + /* Set failure reason */ + if (info) { + memory_exception_data.va = (info->page_addr) << + PAGE_SHIFT; + memory_exception_data.failure.NotPresent = + info->prot_valid ? 1 : 0; + memory_exception_data.failure.NoExecute = + info->prot_exec ? 1 : 0; + memory_exception_data.failure.ReadOnly = + info->prot_write ? 1 : 0; + memory_exception_data.failure.imprecise = 0; + } } rcu_read_lock(); @@ -1261,7 +1266,8 @@ void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, idr_for_each_entry_continue(&p->event_idr, ev, id) if (ev->type == KFD_EVENT_TYPE_MEMORY) { spin_lock(&ev->lock); - ev->memory_exception_data = memory_exception_data; + ev->memory_exception_data = data ? *data : + memory_exception_data; set_event(ev); spin_unlock(&ev->lock); } From bbcc3514ab4f7ec3ae2273ad08b0a1b6b4aa9dd9 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 2 Jun 2023 07:18:06 -0500 Subject: [PATCH 827/861] drm/amd: Check that a system is a NUMA system before looking for SRAT It's pointless on laptops to look for the SRAT table as these are not NUMA. Check the number of possible nodes is > 1 to decide whether to look for SRAT. Suggested-by: Felix Kuehling Signed-off-by: Mario Limonciello Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 950af6820153..3dcd8f8bc98e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2041,7 +2041,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, sub_type_hdr->proximity_domain_from = proximity_domain; #ifdef CONFIG_ACPI_NUMA - if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE) + if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE && + num_possible_nodes() > 1) kfd_find_numa_node_in_srat(kdev); #endif #ifdef CONFIG_NUMA From c1ac2ea802f5adfd1d128fc01375af9c5f113932 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 6 Jun 2023 11:14:04 -0400 Subject: [PATCH 828/861] drm/amdgpu: add missing radeon secondary PCI ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 0x5b70 is a missing RV370 secondary id. Add it so we don't try and probe it with amdgpu. Cc: michel@daenzer.net Reviewed-by: Michel Dänzer Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7489b2b1a0d0..b8a1e4571cd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1640,6 +1640,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = { 0x5874, 0x5940, 0x5941, + 0x5b70, 0x5b72, 0x5b73, 0x5b74, From 597364adc0fcf71617b3adbe647b6eec76e27554 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 31 May 2023 11:22:03 -0400 Subject: [PATCH 829/861] drm/amdkfd: Fix reserved SDMA queues handling This patch fixes a regression caused by a bad merge where the handling of reserved SDMA queues was accidentally removed. With the fix, the reserved SDMA queues are again correctly marked as unavailable for allocation. Fixes: a805889a1531 ("drm/amdkfd: Update SDMA queue management for GFX9.4.3") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 13 ++++++------- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 10 +++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 9fc9d32cb579..9d4abfd8b55e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -106,20 +106,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) kfd->device_info.num_sdma_queues_per_engine = 8; } + bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); + switch (sdma_version) { case IP_VERSION(6, 0, 0): + case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): /* Reserve 1 for paging and 1 for gfx */ kfd->device_info.num_reserved_sdma_queues_per_engine = 2; /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ - kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL; - break; - case IP_VERSION(6, 0, 1): - /* Reserve 1 for paging and 1 for gfx */ - kfd->device_info.num_reserved_sdma_queues_per_engine = 2; - /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */ - kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL; + bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0, + kfd->adev->sdma.num_instances * + kfd->device_info.num_reserved_sdma_queues_per_engine); break; default: break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0c1be91a87c6..498ad7d4e7d9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -123,11 +123,6 @@ unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } -static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) -{ - return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; -} - static void init_sdma_bitmaps(struct device_queue_manager *dqm) { bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); @@ -135,6 +130,11 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); + + /* Mask out the reserved queues */ + bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap, + dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, + KFD_MAX_SDMA_QUEUES); } void program_sh_mem_settings(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 023b17e0116b..7364a5d77c6e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -239,7 +239,7 @@ struct kfd_device_info { uint32_t no_atomic_fw_version; unsigned int num_sdma_queues_per_engine; unsigned int num_reserved_sdma_queues_per_engine; - uint64_t reserved_sdma_queues_bitmap; + DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); }; unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); From 1626761ee4406c51d5afe9d47dd41a29e2049b71 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 3 Feb 2023 02:07:42 +0000 Subject: [PATCH 830/861] drm/connector: Convert DRM_MODE_COLORIMETRY to enum MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to use strongly typed arguments. v2: - Bring NO_DATA back - Provide explicit enum values v3: - Drop unnecessary '&' from kerneldoc (emersion) v4: - Fix Normal Colorimetry comment Signed-off-by: Harry Wentland Reviewed-by: Simon Ser Reviewed-by: Sebastian Wick Reviewed-by: Pekka Paalanen Reviewed-by: Joshua Ashton Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Ville Syrjälä Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- include/drm/display/drm_dp.h | 2 +- include/drm/drm_connector.h | 49 ++++++++++++++++++------------------ 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index d735073fdd81..ccbf0c0934c3 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1636,7 +1636,7 @@ enum dp_pixelformat { * * This enum is used to indicate DP VSC SDP Colorimetry formats. * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through - * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. + * DB18] and a name of enum member follows enum drm_colorimetry definition. * * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or * ITU-R BT.601 colorimetry format diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 7b5048516185..31ecd36bc7ea 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -427,29 +427,30 @@ enum drm_privacy_screen_status { * a colorspace property which will be created and exposed to * userspace. */ - -/* For Default case, driver will set the colorspace */ -#define DRM_MODE_COLORIMETRY_DEFAULT 0 -/* CEA 861 Normal Colorimetry options */ -#define DRM_MODE_COLORIMETRY_NO_DATA 0 -#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1 -#define DRM_MODE_COLORIMETRY_BT709_YCC 2 -/* CEA 861 Extended Colorimetry Options */ -#define DRM_MODE_COLORIMETRY_XVYCC_601 3 -#define DRM_MODE_COLORIMETRY_XVYCC_709 4 -#define DRM_MODE_COLORIMETRY_SYCC_601 5 -#define DRM_MODE_COLORIMETRY_OPYCC_601 6 -#define DRM_MODE_COLORIMETRY_OPRGB 7 -#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8 -#define DRM_MODE_COLORIMETRY_BT2020_RGB 9 -#define DRM_MODE_COLORIMETRY_BT2020_YCC 10 -/* Additional Colorimetry extension added as part of CTA 861.G */ -#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11 -#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12 -/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ -#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13 -#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14 -#define DRM_MODE_COLORIMETRY_BT601_YCC 15 +enum drm_colorspace { + /* For Default case, driver will set the colorspace */ + DRM_MODE_COLORIMETRY_DEFAULT = 0, + /* CEA 861 Normal Colorimetry options */ + DRM_MODE_COLORIMETRY_NO_DATA = 0, + DRM_MODE_COLORIMETRY_SMPTE_170M_YCC = 1, + DRM_MODE_COLORIMETRY_BT709_YCC = 2, + /* CEA 861 Extended Colorimetry Options */ + DRM_MODE_COLORIMETRY_XVYCC_601 = 3, + DRM_MODE_COLORIMETRY_XVYCC_709 = 4, + DRM_MODE_COLORIMETRY_SYCC_601 = 5, + DRM_MODE_COLORIMETRY_OPYCC_601 = 6, + DRM_MODE_COLORIMETRY_OPRGB = 7, + DRM_MODE_COLORIMETRY_BT2020_CYCC = 8, + DRM_MODE_COLORIMETRY_BT2020_RGB = 9, + DRM_MODE_COLORIMETRY_BT2020_YCC = 10, + /* Additional Colorimetry extension added as part of CTA 861.G */ + DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 = 11, + DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12, + /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ + DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13, + DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14, + DRM_MODE_COLORIMETRY_BT601_YCC = 15, +}; /** * enum drm_bus_flags - bus_flags info for &drm_display_info @@ -901,7 +902,7 @@ struct drm_connector_state { * colorspace change on Sink. This is most commonly used to switch * to wider color gamuts like BT2020. */ - u32 colorspace; + enum drm_colorspace colorspace; /** * @writeback_job: Writeback job for writeback connectors From f96c61fe0383d73732aba72fabb7e2c7ce0b0835 Mon Sep 17 00:00:00 2001 From: Joshua Ashton Date: Fri, 3 Feb 2023 02:07:43 +0000 Subject: [PATCH 831/861] drm/connector: Add enum documentation to drm_colorspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To match the other enums, and add more information about these values. v2: - Specify where an enum entry comes from - Clarify DEFAULT and NO_DATA behavior - BT.2020 CYCC is "constant luminance" - correct type for BT.601 v4: - drop DP/HDMI clarifications that might create more questions than answers v5: - Add note on YCC and RGB variants Signed-off-by: Joshua Ashton Signed-off-by: Harry Wentland Reviewed-by: Harry Wentland Reviewed-by: Sebastian Wick Acked-by: Pekka Paalanen Reviewed-by: Simon Ser Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Ville Syrjälä Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- include/drm/drm_connector.h | 70 +++++++++++++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 2 deletions(-) diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 31ecd36bc7ea..e338432580e0 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -419,13 +419,79 @@ enum drm_privacy_screen_status { PRIVACY_SCREEN_ENABLED_LOCKED, }; -/* - * This is a consolidated colorimetry list supported by HDMI and +/** + * enum drm_colorspace - color space + * + * This enum is a consolidated colorimetry list supported by HDMI and * DP protocol standard. The respective connectors will register * a property with the subset of this list (supported by that * respective protocol). Userspace will set the colorspace through * a colorspace property which will be created and exposed to * userspace. + * + * DP definitions come from the DP v2.0 spec + * HDMI definitions come from the CTA-861-H spec + * + * A note on YCC and RGB variants: + * + * Since userspace is not aware of the encoding on the wire + * (RGB or YCbCr), drivers are free to pick the appropriate + * variant, regardless of what userspace selects. E.g., if + * BT2020_RGB is selected by userspace a driver will pick + * BT2020_YCC if the encoding on the wire is YUV444 or YUV420. + * + * @DRM_MODE_COLORIMETRY_DEFAULT: + * Driver specific behavior. + * @DRM_MODE_COLORIMETRY_NO_DATA: + * Driver specific behavior. + * @DRM_MODE_COLORIMETRY_SMPTE_170M_YCC: + * (HDMI) + * SMPTE ST 170M colorimetry format + * @DRM_MODE_COLORIMETRY_BT709_YCC: + * (HDMI, DP) + * ITU-R BT.709 colorimetry format + * @DRM_MODE_COLORIMETRY_XVYCC_601: + * (HDMI, DP) + * xvYCC601 colorimetry format + * @DRM_MODE_COLORIMETRY_XVYCC_709: + * (HDMI, DP) + * xvYCC709 colorimetry format + * @DRM_MODE_COLORIMETRY_SYCC_601: + * (HDMI, DP) + * sYCC601 colorimetry format + * @DRM_MODE_COLORIMETRY_OPYCC_601: + * (HDMI, DP) + * opYCC601 colorimetry format + * @DRM_MODE_COLORIMETRY_OPRGB: + * (HDMI, DP) + * opRGB colorimetry format + * @DRM_MODE_COLORIMETRY_BT2020_CYCC: + * (HDMI, DP) + * ITU-R BT.2020 Y'c C'bc C'rc (constant luminance) colorimetry format + * @DRM_MODE_COLORIMETRY_BT2020_RGB: + * (HDMI, DP) + * ITU-R BT.2020 R' G' B' colorimetry format + * @DRM_MODE_COLORIMETRY_BT2020_YCC: + * (HDMI, DP) + * ITU-R BT.2020 Y' C'b C'r colorimetry format + * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + * (HDMI) + * SMPTE ST 2113 P3D65 colorimetry format + * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + * (HDMI) + * SMPTE ST 2113 P3DCI colorimetry format + * @DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED: + * (DP) + * RGB wide gamut fixed point colorimetry format + * @DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT: + * (DP) + * RGB wide gamut floating point + * (scRGB (IEC 61966-2-2)) colorimetry format + * @DRM_MODE_COLORIMETRY_BT601_YCC: + * (DP) + * ITU-R BT.601 colorimetry format + * The DP spec does not say whether this is the 525 or the 625 + * line version. */ enum drm_colorspace { /* For Default case, driver will set the colorspace */ From 6120611abc05dd850eff4eb3026f977ac7e34718 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Wed, 30 Nov 2022 14:42:03 -0500 Subject: [PATCH 832/861] drm/connector: Pull out common create_colorspace_property code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Harry Wentland Reviewed-by: Sebastian Wick Reviewed-by: Joshua Ashton Reviewed-by: Simon Ser Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Ville Syrjälä Cc: Joshua Ashton Cc: Jani Nikula Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_connector.c | 56 ++++++++++++++++----------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 48df7a5ea503..614dd9fd4d3f 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2135,6 +2135,28 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); * drm_mode_create_dp_colorspace_property() is used for DP connector. */ +static int drm_mode_create_colorspace_property(struct drm_connector *connector, + const struct drm_prop_enum_list *colorspaces, + int size) +{ + struct drm_device *dev = connector->dev; + + if (connector->colorspace_property) + return 0; + + if (!colorspaces) + return 0; + + connector->colorspace_property = + drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace", + colorspaces, + size); + + if (!connector->colorspace_property) + return -ENOMEM; + + return 0; +} /** * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property * @connector: connector to create the Colorspace property on. @@ -2147,20 +2169,9 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); */ int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - - if (connector->colorspace_property) - return 0; - - connector->colorspace_property = - drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace", - hdmi_colorspaces, - ARRAY_SIZE(hdmi_colorspaces)); - - if (!connector->colorspace_property) - return -ENOMEM; - - return 0; + return drm_mode_create_colorspace_property(connector, + hdmi_colorspaces, + ARRAY_SIZE(hdmi_colorspaces)); } EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); @@ -2176,20 +2187,9 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); */ int drm_mode_create_dp_colorspace_property(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - - if (connector->colorspace_property) - return 0; - - connector->colorspace_property = - drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace", - dp_colorspaces, - ARRAY_SIZE(dp_colorspaces)); - - if (!connector->colorspace_property) - return -ENOMEM; - - return 0; + return drm_mode_create_colorspace_property(connector, + dp_colorspaces, + ARRAY_SIZE(dp_colorspaces)); } EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property); From c627087cb164d1675323c7942fa29bded4263dfc Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 29 Nov 2022 15:16:31 -0500 Subject: [PATCH 833/861] drm/connector: Use common colorspace_names array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We an use bitfields to track the support ones for HDMI and DP. This allows us to print colorspaces in a consistent manner without needing to know whether we're dealing with DP or HDMI. v4: - Rename _MAX to _COUNT and leave comment to indicate it's not a valid value - Fix misplaced function doc v6: - Drop magic in drm_mode_create_colorspace_property for dealing with "0" supported_colorspaces. Expect the caller to always provide a non-zero supported_colorspaces. - Improve error checking and logging Signed-off-by: Harry Wentland Reviewed-by: Sebastian Wick Reviewed-by: Joshua Ashton Reviewed-by: Simon Ser Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Ville Syrjälä Cc: Joshua Ashton Cc: Jani Nikula Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_connector.c | 130 +++++++++++++++++++------------- include/drm/drm_connector.h | 2 + 2 files changed, 79 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 614dd9fd4d3f..b823b10ed697 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1055,64 +1055,70 @@ static const struct drm_prop_enum_list drm_dp_subconnector_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_dp_subconnector_name, drm_dp_subconnector_enum_list) -static const struct drm_prop_enum_list hdmi_colorspaces[] = { + +static const char * const colorspace_names[] = { /* For Default case, driver will set the colorspace */ - { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + [DRM_MODE_COLORIMETRY_DEFAULT] = "Default", /* Standard Definition Colorimetry based on CEA 861 */ - { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" }, - { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, + [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = "SMPTE_170M_YCC", + [DRM_MODE_COLORIMETRY_BT709_YCC] = "BT709_YCC", /* Standard Definition Colorimetry based on IEC 61966-2-4 */ - { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + [DRM_MODE_COLORIMETRY_XVYCC_601] = "XVYCC_601", /* High Definition Colorimetry based on IEC 61966-2-4 */ - { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + [DRM_MODE_COLORIMETRY_XVYCC_709] = "XVYCC_709", /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ - { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, + [DRM_MODE_COLORIMETRY_SYCC_601] = "SYCC_601", /* Colorimetry based on IEC 61966-2-5 [33] */ - { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, + [DRM_MODE_COLORIMETRY_OPYCC_601] = "opYCC_601", /* Colorimetry based on IEC 61966-2-5 */ - { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + [DRM_MODE_COLORIMETRY_OPRGB] = "opRGB", /* Colorimetry based on ITU-R BT.2020 */ - { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, + [DRM_MODE_COLORIMETRY_BT2020_CYCC] = "BT2020_CYCC", /* Colorimetry based on ITU-R BT.2020 */ - { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + [DRM_MODE_COLORIMETRY_BT2020_RGB] = "BT2020_RGB", /* Colorimetry based on ITU-R BT.2020 */ - { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, + [DRM_MODE_COLORIMETRY_BT2020_YCC] = "BT2020_YCC", /* Added as part of Additional Colorimetry Extension in 861.G */ - { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, - { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" }, + [DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65] = "DCI-P3_RGB_D65", + [DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER] = "DCI-P3_RGB_Theater", + [DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED] = "RGB_WIDE_FIXED", + /* Colorimetry based on scRGB (IEC 61966-2-2) */ + [DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT] = "RGB_WIDE_FLOAT", + [DRM_MODE_COLORIMETRY_BT601_YCC] = "BT601_YCC", }; +static const u32 hdmi_colorspaces = + BIT(DRM_MODE_COLORIMETRY_SMPTE_170M_YCC) | + BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | + BIT(DRM_MODE_COLORIMETRY_XVYCC_601) | + BIT(DRM_MODE_COLORIMETRY_XVYCC_709) | + BIT(DRM_MODE_COLORIMETRY_SYCC_601) | + BIT(DRM_MODE_COLORIMETRY_OPYCC_601) | + BIT(DRM_MODE_COLORIMETRY_OPRGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_CYCC) | + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC) | + BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65) | + BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER); + /* * As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry * Format Table 2-120 */ -static const struct drm_prop_enum_list dp_colorspaces[] = { - /* For Default case, driver will set the colorspace */ - { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, - { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" }, - /* Colorimetry based on scRGB (IEC 61966-2-2) */ - { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" }, - /* Colorimetry based on IEC 61966-2-5 */ - { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, - /* Colorimetry based on SMPTE RP 431-2 */ - { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, - /* Colorimetry based on ITU-R BT.2020 */ - { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, - { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" }, - { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, - /* Standard Definition Colorimetry based on IEC 61966-2-4 */ - { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, - /* High Definition Colorimetry based on IEC 61966-2-4 */ - { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, - /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ - { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, - /* Colorimetry based on IEC 61966-2-5 [33] */ - { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, - /* Colorimetry based on ITU-R BT.2020 */ - { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, - /* Colorimetry based on ITU-R BT.2020 */ - { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, -}; +static const u32 dp_colorspaces = + BIT(DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED) | + BIT(DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT) | + BIT(DRM_MODE_COLORIMETRY_OPRGB) | + BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65) | + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT601_YCC) | + BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | + BIT(DRM_MODE_COLORIMETRY_XVYCC_601) | + BIT(DRM_MODE_COLORIMETRY_XVYCC_709) | + BIT(DRM_MODE_COLORIMETRY_SYCC_601) | + BIT(DRM_MODE_COLORIMETRY_OPYCC_601) | + BIT(DRM_MODE_COLORIMETRY_BT2020_CYCC) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); /** * DOC: standard connector properties @@ -2136,27 +2142,49 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); */ static int drm_mode_create_colorspace_property(struct drm_connector *connector, - const struct drm_prop_enum_list *colorspaces, - int size) + u32 supported_colorspaces) { struct drm_device *dev = connector->dev; + u32 colorspaces = supported_colorspaces | BIT(DRM_MODE_COLORIMETRY_DEFAULT); + struct drm_prop_enum_list enum_list[DRM_MODE_COLORIMETRY_COUNT]; + int i, len; if (connector->colorspace_property) return 0; - if (!colorspaces) - return 0; + if (!supported_colorspaces) { + drm_err(dev, "No supported colorspaces provded on [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + return -EINVAL; + } + + if ((supported_colorspaces & -BIT(DRM_MODE_COLORIMETRY_COUNT)) != 0) { + drm_err(dev, "Unknown colorspace provded on [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + return -EINVAL; + } + + len = 0; + for (i = 0; i < DRM_MODE_COLORIMETRY_COUNT; i++) { + if ((colorspaces & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = colorspace_names[i]; + len++; + } connector->colorspace_property = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace", - colorspaces, - size); + enum_list, + len); if (!connector->colorspace_property) return -ENOMEM; return 0; } + /** * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property * @connector: connector to create the Colorspace property on. @@ -2169,9 +2197,7 @@ static int drm_mode_create_colorspace_property(struct drm_connector *connector, */ int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector) { - return drm_mode_create_colorspace_property(connector, - hdmi_colorspaces, - ARRAY_SIZE(hdmi_colorspaces)); + return drm_mode_create_colorspace_property(connector, hdmi_colorspaces); } EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); @@ -2187,9 +2213,7 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); */ int drm_mode_create_dp_colorspace_property(struct drm_connector *connector) { - return drm_mode_create_colorspace_property(connector, - dp_colorspaces, - ARRAY_SIZE(dp_colorspaces)); + return drm_mode_create_colorspace_property(connector, dp_colorspaces); } EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index e338432580e0..6eb8e2d5d20f 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -516,6 +516,8 @@ enum drm_colorspace { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13, DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14, DRM_MODE_COLORIMETRY_BT601_YCC = 15, + /* not a valid value; merely used for counting */ + DRM_MODE_COLORIMETRY_COUNT }; /** From 035d53e0f36da6ce49abf7bea3d9b30a075ff247 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Wed, 30 Nov 2022 16:11:30 -0500 Subject: [PATCH 834/861] drm/connector: Print connector colorspace in state debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v3: Fix kerneldocs (kernel test robot) v4: Avoid returning NULL from drm_get_colorspace_name Signed-off-by: Harry Wentland Reviewed-by: Sebastian Wick Reviewed-by: Joshua Ashton Reviewed-by: Simon Ser Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Ville Syrjälä Cc: Joshua Ashton Cc: Jani Nikula Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_atomic.c | 1 + drivers/gpu/drm/drm_connector.c | 15 +++++++++++++++ include/drm/drm_connector.h | 1 + 3 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index b4c6ffc438da..2c454568a607 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1131,6 +1131,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); + drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace)); if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) if (state->writeback_job && state->writeback_job->fb) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index b823b10ed697..e1d7a40e1776 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1087,6 +1087,21 @@ static const char * const colorspace_names[] = { [DRM_MODE_COLORIMETRY_BT601_YCC] = "BT601_YCC", }; +/** + * drm_get_colorspace_name - return a string for color encoding + * @colorspace: color space to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorspace_name(enum drm_colorspace colorspace) +{ + if (colorspace < ARRAY_SIZE(colorspace_names) && colorspace_names[colorspace]) + return colorspace_names[colorspace]; + else + return "(null)"; +} + static const u32 hdmi_colorspaces = BIT(DRM_MODE_COLORIMETRY_SMPTE_170M_YCC) | BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 6eb8e2d5d20f..880220321867 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -2078,6 +2078,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); bool drm_connector_has_possible_encoder(struct drm_connector *connector, struct drm_encoder *encoder); +const char *drm_get_colorspace_name(enum drm_colorspace colorspace); /** * drm_for_each_connector_iter - connector_list iterator macro From c265f340eaa87aa5f979adfb23d7463af67b7f27 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 29 Nov 2022 15:16:31 -0500 Subject: [PATCH 835/861] drm/connector: Allow drivers to pass list of supported colorspaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers might not support all colorspaces defined in dp_colorspaces and hdmi_colorspaces. This results in undefined behavior when userspace is setting an unsupported colorspace. Allow drivers to pass the list of supported colorspaces when creating the colorspace property. v2: - Use 0 to indicate support for all colorspaces (Jani) - Print drm_dbg_kms message when drivers pass 0 to signal that drivers should specify supported colorspaecs explicity (Jani) v3: - Move changes to create a common colorspace_names array to separate patch v6: - Avoid magic when passing 0 for supported_colorspaces; be explicit in treating it as "all DP/HDMI" Signed-off-by: Harry Wentland Reviewed-by: Sebastian Wick Reviewed-by: Joshua Ashton Reviewed-by: Simon Ser Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Ville Syrjälä Cc: Joshua Ashton Cc: Jani Nikula Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_connector.c | 24 +++++++++++++++---- .../gpu/drm/i915/display/intel_connector.c | 4 ++-- drivers/gpu/drm/vc4/vc4_hdmi.c | 2 +- include/drm/drm_connector.h | 7 ++++-- 4 files changed, 28 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index e1d7a40e1776..3ed4cfcb350c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2210,9 +2210,17 @@ static int drm_mode_create_colorspace_property(struct drm_connector *connector, * Returns: * Zero on success, negative errno on failure. */ -int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector) +int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector, + u32 supported_colorspaces) { - return drm_mode_create_colorspace_property(connector, hdmi_colorspaces); + u32 colorspaces; + + if (supported_colorspaces) + colorspaces = supported_colorspaces & hdmi_colorspaces; + else + colorspaces = hdmi_colorspaces; + + return drm_mode_create_colorspace_property(connector, colorspaces); } EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); @@ -2226,9 +2234,17 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); * Returns: * Zero on success, negative errno on failure. */ -int drm_mode_create_dp_colorspace_property(struct drm_connector *connector) +int drm_mode_create_dp_colorspace_property(struct drm_connector *connector, + u32 supported_colorspaces) { - return drm_mode_create_colorspace_property(connector, dp_colorspaces); + u32 colorspaces; + + if (supported_colorspaces) + colorspaces = supported_colorspaces & dp_colorspaces; + else + colorspaces = dp_colorspaces; + + return drm_mode_create_colorspace_property(connector, colorspaces); } EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property); diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 257afac34839..3abfe29d0b44 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -280,14 +280,14 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector) void intel_attach_hdmi_colorspace_property(struct drm_connector *connector) { - if (!drm_mode_create_hdmi_colorspace_property(connector)) + if (!drm_mode_create_hdmi_colorspace_property(connector, 0)) drm_connector_attach_colorspace_property(connector); } void intel_attach_dp_colorspace_property(struct drm_connector *connector) { - if (!drm_mode_create_dp_colorspace_property(connector)) + if (!drm_mode_create_dp_colorspace_property(connector, 0)) drm_connector_attach_colorspace_property(connector); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 06713d8b82b5..482397d5cb48 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -631,7 +631,7 @@ static int vc4_hdmi_connector_init(struct drm_device *dev, if (ret) return ret; - ret = drm_mode_create_hdmi_colorspace_property(connector); + ret = drm_mode_create_hdmi_colorspace_property(connector, 0); if (ret) return ret; diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 880220321867..3cea00346205 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -1994,8 +1995,10 @@ int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *conn bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state, struct drm_connector_state *new_state); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); -int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); -int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); +int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector, + u32 supported_colorspaces); +int drm_mode_create_dp_colorspace_property(struct drm_connector *connector, + u32 supported_colorspaces); int drm_mode_create_content_type_property(struct drm_device *dev); int drm_mode_create_suggested_offset_properties(struct drm_device *dev); From cb841d27b8767fd88096d06186b5f5de990fd6d0 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 1 Apr 2022 13:45:29 -0400 Subject: [PATCH 836/861] drm/amd/display: Always pass connector_state to stream validation We need the connector_state for colorspace and scaling information and can get it from connector->state. Signed-off-by: Harry Wentland Reviewed-by: Joshua Ashton Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d0532d7e8341..dd66455a8f36 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5955,15 +5955,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, { struct drm_display_mode *preferred_mode = NULL; struct drm_connector *drm_connector; - const struct drm_connector_state *con_state = - dm_state ? &dm_state->base : NULL; + const struct drm_connector_state *con_state = &dm_state->base; struct dc_stream_state *stream = NULL; struct drm_display_mode mode; struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; bool recalculate_timing = false; - bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool scale = dm_state->scaling != RMX_OFF; int mode_refresh; int preferred_refresh = 0; enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; @@ -6605,7 +6604,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec goto fail; } - stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); + stream = create_validate_stream_for_sink(aconnector, mode, + to_dm_connector_state(connector->state), + NULL); if (stream) { dc_stream_release(stream); result = MODE_OK; From 15f9dfd545a1edd604648961feadce16791d0f4f Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 25 Mar 2022 15:30:28 -0400 Subject: [PATCH 837/861] drm/amd/display: Register Colorspace property for DP and HDMI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want compositors to be able to set the output colorspace on DP and HDMI outputs, based on the caps reported from the receiver via EDID. Signed-off-by: Harry Wentland Reviewed-by: Joshua Ashton Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Joshua Ashton Cc: Simon Ser Cc: Ville Syrjälä Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dd66455a8f36..8e2090280f44 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7247,6 +7247,12 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) return amdgpu_dm_connector->num_modes; } +static const u32 supported_colorspaces = + BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | + BIT(DRM_MODE_COLORIMETRY_OPRGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); + void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int connector_type, @@ -7327,6 +7333,15 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.abm_level_property, 0); } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } else if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { + if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) { From a0b433c858ac1d2e03cbfd5bb34b9b61906600eb Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 29 Mar 2022 11:26:23 -0400 Subject: [PATCH 838/861] drm/amd/display: Signal mode_changed if colorspace changed We need to signal mode_changed to make sure we update the output colorspace. v2: No need to call drm_hdmi_avi_infoframe_colorimetry as DC does its own infoframe packing. Signed-off-by: Harry Wentland Reviewed-by: Leo Li Reviewed-by: Joshua Ashton Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Uma Shankar Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8e2090280f44..49be49c85c50 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6700,6 +6700,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, if (!crtc) return 0; + if (new_con_state->colorspace != old_con_state->colorspace) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { struct dc_info_packet hdr_infopacket; @@ -6722,7 +6730,7 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, * set is permissible, however. So only force a * modeset if we're entering or exiting HDR. */ - new_crtc_state->mode_changed = + new_crtc_state->mode_changed = new_crtc_state->mode_changed || !old_con_state->hdr_output_metadata || !new_con_state->hdr_output_metadata; } From 2e656827ceed9fb1ba406e7cd11d7b572010add0 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 29 Mar 2022 15:30:05 -0400 Subject: [PATCH 839/861] drm/amd/display: Send correct DP colorspace infopacket Look at connector->colorimetry to determine output colorspace. We don't want to impact current SDR behavior, so DRM_MODE_COLORIMETRY_DEFAULT preserves current behavior. Also add support to explicitly set BT601 and BT709. v4: - Roll support for BT709 and BT601 into this patch - Add default case to avoid warnings for unhandled enum values Signed-off-by: Harry Wentland Reviewed-by: Joshua Ashton Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 48 ++++++++++++------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 49be49c85c50..67dad847fd04 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5339,21 +5339,44 @@ get_aspect_ratio(const struct drm_display_mode *mode_in) } static enum dc_color_space -get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) +get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, + const struct drm_connector_state *connector_state) { enum dc_color_space color_space = COLOR_SPACE_SRGB; - switch (dc_crtc_timing->pixel_encoding) { - case PIXEL_ENCODING_YCBCR422: - case PIXEL_ENCODING_YCBCR444: - case PIXEL_ENCODING_YCBCR420: - { + switch (connector_state->colorspace) { + case DRM_MODE_COLORIMETRY_BT601_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + else + color_space = COLOR_SPACE_YCBCR601; + break; + case DRM_MODE_COLORIMETRY_BT709_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + else + color_space = COLOR_SPACE_YCBCR709; + break; + case DRM_MODE_COLORIMETRY_OPRGB: + color_space = COLOR_SPACE_ADOBERGB; + break; + case DRM_MODE_COLORIMETRY_BT2020_RGB: + case DRM_MODE_COLORIMETRY_BT2020_YCC: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) + color_space = COLOR_SPACE_2020_RGB_FULLRANGE; + else + color_space = COLOR_SPACE_2020_YCBCR; + break; + case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 + default: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { + color_space = COLOR_SPACE_SRGB; /* * 27030khz is the separation point between HDTV and SDTV * according to HDMI spec, we use YCbCr709 and YCbCr601 * respectively */ - if (dc_crtc_timing->pix_clk_100hz > 270300) { + } else if (dc_crtc_timing->pix_clk_100hz > 270300) { if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR709_LIMITED; @@ -5366,15 +5389,6 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) else color_space = COLOR_SPACE_YCBCR601; } - - } - break; - case PIXEL_ENCODING_RGB: - color_space = COLOR_SPACE_SRGB; - break; - - default: - WARN_ON(1); break; } @@ -5513,7 +5527,7 @@ static void fill_stream_properties_from_drm_display_mode( } } - stream->output_color_space = get_output_color_space(timing_out); + stream->output_color_space = get_output_color_space(timing_out, connector_state); } static void fill_audio_info(struct audio_info *audio_info, From bd49f19039c1806cd10cff8aaec7f90ebf28f0e9 Mon Sep 17 00:00:00 2001 From: Joshua Ashton Date: Mon, 14 Nov 2022 19:52:30 +0000 Subject: [PATCH 840/861] drm/amd/display: Always set crtcinfo from create_stream_for_sink Given that we always pass dm_state into here now, this won't ever trigger anymore. This is needed for we will always fail mode validation with invalid clocks or link bandwidth errors. Signed-off-by: Joshua Ashton Signed-off-by: Harry Wentland Reviewed-by: Harry Wentland Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 67dad847fd04..a6bc2c6cad5c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6055,7 +6055,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (recalculate_timing) drm_mode_set_crtcinfo(&saved_mode, 0); - else if (!dm_state) + else drm_mode_set_crtcinfo(&mode, 0); /* From fd45b6540f513887c172e2082d437209fe8f4a54 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 29 Nov 2022 17:24:52 -0500 Subject: [PATCH 841/861] drm/amd/display: Add debugfs for testing output colorspace In order to IGT test colorspace we'll want to print the currently enabled colorspace on a stream. We add a new debugfs to do so, using the same scheme as current bpc reporting. This might also come in handy when debugging display issues. v4: - Fix function doc comment - Fix sRGB debug print Signed-off-by: Harry Wentland Reviewed-by: Joshua Ashton Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 82234397dd44..caf13b2e8cb6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -906,6 +906,61 @@ unlock: } DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); +/* + * Returns the current colorspace for the crtc. + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace + */ +static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) +{ + struct drm_crtc *crtc = m->private; + struct drm_device *dev = crtc->dev; + struct dm_crtc_state *dm_crtc_state = NULL; + int res = -ENODEV; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + switch (dm_crtc_state->stream->output_color_space) { + case COLOR_SPACE_SRGB: + seq_printf(m, "sRGB"); + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + seq_printf(m, "BT601_YCC"); + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + seq_printf(m, "BT709_YCC"); + break; + case COLOR_SPACE_ADOBERGB: + seq_printf(m, "opRGB"); + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + seq_printf(m, "BT2020_RGB"); + break; + case COLOR_SPACE_2020_YCBCR: + seq_printf(m, "BT2020_YCC"); + break; + default: + goto unlock; + } + res = 0; + +unlock: + drm_modeset_unlock(&crtc->mutex); + mutex_unlock(&dev->mode_config.mutex); + + return res; +} +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace); + + /* * Example usage: * Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX @@ -3139,6 +3194,8 @@ void crtc_debugfs_init(struct drm_crtc *crtc) #endif debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, crtc, &amdgpu_current_bpc_fops); + debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry, + crtc, &amdgpu_current_colorspace_fops); } /* From eaa7d8301109092670c5cf3e12c502618d6adc51 Mon Sep 17 00:00:00 2001 From: Joshua Ashton Date: Tue, 10 Jan 2023 19:14:06 +0000 Subject: [PATCH 842/861] drm/amd/display: Refactor avi_info_frame colorimetry determination Replace the messy two if-else chains here that were on the same value with a switch on the enum. Signed-off-by: Joshua Ashton Signed-off-by: Harry Wentland Reviewed-by: Harry Wentland Cc: Pekka Paalanen Cc: Sebastian Wick Cc: Vitaly.Prosyak@amd.com Cc: Joshua Ashton Cc: Simon Ser Cc: Melissa Wen Cc: dri-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/core/dc_resource.c | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c72540d37aef..2f3d9a698486 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3035,23 +3035,29 @@ static void set_avi_info_frame( hdmi_info.bits.S0_S1 = scan_type; /* C0, C1 : Colorimetry */ - if (color_space == COLOR_SPACE_YCBCR709 || - color_space == COLOR_SPACE_YCBCR709_LIMITED) + switch (color_space) { + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; - else if (color_space == COLOR_SPACE_YCBCR601 || - color_space == COLOR_SPACE_YCBCR601_LIMITED) + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601; - else { - hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA; - } - if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE || - color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE || - color_space == COLOR_SPACE_2020_YCBCR) { + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_2020_YCBCR: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; - } else if (color_space == COLOR_SPACE_ADOBERGB) { + break; + case COLOR_SPACE_ADOBERGB: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; + break; + case COLOR_SPACE_SRGB: + default: + hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA; + break; } if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && From 5daff15cd013422bc6d1efcfe82b586800025384 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Sat, 6 May 2023 12:01:33 +0800 Subject: [PATCH 843/861] drm/amdgpu: unmap and remove csa_va properly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Root PD BO should be reserved before unmap and remove a bo_va from VM otherwise lockdep will complain. v2: check fpriv->csa_va is not NULL instead of amdgpu_mcbp (christian) [14616.936827] WARNING: CPU: 6 PID: 1711 at drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:1762 amdgpu_vm_bo_del+0x399/0x3f0 [amdgpu] [14616.937096] Call Trace: [14616.937097] [14616.937102] amdgpu_driver_postclose_kms+0x249/0x2f0 [amdgpu] [14616.937187] drm_file_free+0x1d6/0x300 [drm] [14616.937207] drm_close_helper.isra.0+0x62/0x70 [drm] [14616.937220] drm_release+0x5e/0x100 [drm] [14616.937234] __fput+0x9f/0x280 [14616.937239] ____fput+0xe/0x20 [14616.937241] task_work_run+0x61/0x90 [14616.937246] exit_to_user_mode_prepare+0x215/0x220 [14616.937251] syscall_exit_to_user_mode+0x2a/0x60 [14616.937254] do_syscall_64+0x48/0x90 [14616.937257] entry_SYSCALL_64_after_hwframe+0x63/0xcd Signed-off-by: Lang Yu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 38 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 +++---- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index c6d4d41c4393..23d054526e7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -106,3 +106,41 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ttm_eu_backoff_reservation(&ticket, &list); return 0; } + +int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, + uint64_t csa_addr) +{ + struct ww_acquire_ctx ticket; + struct list_head list; + struct amdgpu_bo_list_entry pd; + struct ttm_validate_buffer csa_tv; + int r; + + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&csa_tv.head); + csa_tv.bo = &bo->tbo; + csa_tv.num_shared = 1; + + list_add(&csa_tv.head, &list); + amdgpu_vm_get_pd_bo(vm, &list, &pd); + + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + if (r) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + return r; + } + + r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr); + if (r) { + DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r); + ttm_eu_backoff_reservation(&ticket, &list); + return r; + } + + amdgpu_vm_bo_del(adev, bo_va); + + ttm_eu_backoff_reservation(&ticket, &list); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h index 524b4437a021..7dfc1f2012eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h @@ -34,6 +34,9 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size); +int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, + uint64_t csa_addr); void amdgpu_free_static_csa(struct amdgpu_bo **bo); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 41d047e5de69..e3531aa3c8bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1311,12 +1311,12 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) amdgpu_vce_free_handles(adev, file_priv); - if (amdgpu_mcbp) { - /* TODO: how to handle reserve failure */ - BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); - amdgpu_vm_bo_del(adev, fpriv->csa_va); + if (fpriv->csa_va) { + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; + + WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, + fpriv->csa_va, csa_addr)); fpriv->csa_va = NULL; - amdgpu_bo_unreserve(adev->virt.csa_obj); } pasid = fpriv->vm.pasid; From 731b48463b0d96eda1f1684eacde6e9c8065df83 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 5 Jun 2023 11:39:30 +0800 Subject: [PATCH 844/861] drm/amdgpu: disable virtual display support on APP device virtual display is not support on APP device. Signed-off-by: Yang Wang Signed-off-by: Gavin Wan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 2c1fbed24535..0f1ca0136f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -56,7 +56,8 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) /* enable virtual display */ if (adev->asic_type != CHIP_ALDEBARAN && - adev->asic_type != CHIP_ARCTURUS) { + adev->asic_type != CHIP_ARCTURUS && + ((adev->pdev->class >> 8) != AMD_ACCELERATOR_PROCESSING)) { if (adev->mode_info.num_crtc == 0) adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; From cab69d36ccdbfa3fa0b5627a032150369c20b4f3 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 6 Jun 2023 14:42:39 +0800 Subject: [PATCH 845/861] drm/amdgpu: skip to resume rlcg for gc 9.4.3 in vf side skip to resume rlcg, because rlcg is already enabled in pf side. Signed-off-by: Yang Wang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ebdd7fa985d6..f5b8d3f388ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1293,6 +1293,9 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) { int r, i, num_xcc; + if (amdgpu_sriov_vf(adev)) + return 0; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_rlc_resume(adev, i); @@ -4321,11 +4324,13 @@ static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) for_each_inst(i, tmp_mask) gfx_v9_4_3_xcc_constants_init(adev, i); - tmp_mask = inst_mask; - for_each_inst(i, tmp_mask) { - r = gfx_v9_4_3_xcc_rlc_resume(adev, i); - if (r) - return r; + if (!amdgpu_sriov_vf(adev)) { + tmp_mask = inst_mask; + for_each_inst(i, tmp_mask) { + r = gfx_v9_4_3_xcc_rlc_resume(adev, i); + if (r) + return r; + } } tmp_mask = inst_mask; From 27d196c4491458ca00014cfe1cfa9d0fa87a2ff9 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Wed, 7 Jun 2023 11:24:48 +0800 Subject: [PATCH 846/861] drm/amd/pm: fix vclk setting failed for SMU v13.0.4 PMFW use the left-shifted 16 bits argument to set the VCLK DPM frequency for SMU v13.0.4. Signed-off-by: Tim Huang Acked-by: Alex Deucher Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 2 ++ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 5a99a091965e..6a0ac0bbaace 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -51,6 +51,8 @@ #define CTF_OFFSET_HOTSPOT 5 #define CTF_OFFSET_MEM 5 +#define SMU_13_VCLK_SHIFT 16 + extern const int pmfw_decoded_link_speed[5]; extern const int pmfw_decoded_link_width[7]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 46a8a366f287..999b07db862e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -831,6 +831,8 @@ static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu, uint32_t max) { enum smu_message_type msg_set_min, msg_set_max; + uint32_t min_clk = min; + uint32_t max_clk = max; int ret = 0; if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) @@ -859,12 +861,17 @@ static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu, return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + if (clk_type == SMU_VCLK) { + min_clk = min << SMU_13_VCLK_SHIFT; + max_clk = max << SMU_13_VCLK_SHIFT; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL); if (ret) return ret; return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, - max, NULL); + max_clk, NULL); } static int smu_v13_0_4_force_clk_levels(struct smu_context *smu, From 2d0ee64e9846ed4036fd11c5b900a21039ee8b7a Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Fri, 2 Jun 2023 16:57:12 +0800 Subject: [PATCH 847/861] drm/amd/pm: enable vclk and dclk Pstates for SMU v13.0.4 Add the ability to control the vclk and dclk frequency by power_dpm_force_performance_level interface. Signed-off-by: Tim Huang Acked-by: Alex Deucher Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 999b07db862e..315a6d8bde2e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -915,6 +915,8 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu, uint32_t sclk_min = 0, sclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; + uint32_t vclk_min = 0, vclk_max = 0; + uint32_t dclk_min = 0, dclk_max = 0; int ret = 0; switch (level) { @@ -922,22 +924,32 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu, smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; + vclk_min = vclk_max; + dclk_min = dclk_max; break; case AMD_DPM_FORCED_LEVEL_LOW: smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; + vclk_max = vclk_min; + dclk_max = dclk_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -983,6 +995,23 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu, return ret; } + if (vclk_min && vclk_max) { + ret = smu_v13_0_4_set_soft_freq_limited_range(smu, + SMU_VCLK, + vclk_min, + vclk_max); + if (ret) + return ret; + } + + if (dclk_min && dclk_max) { + ret = smu_v13_0_4_set_soft_freq_limited_range(smu, + SMU_DCLK, + dclk_min, + dclk_max); + if (ret) + return ret; + } return ret; } From 55682a893844cc64e3a85806b0c3ca7a77b905c3 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Mon, 5 Jun 2023 16:57:45 +0800 Subject: [PATCH 848/861] drm/amd/pm: enable more Pstates profile levels for SMU v13.0.4 This patch enables following UMD stable Pstates profile levels for power_dpm_force_performance_level interface. - profile_peak - profile_min_mclk - profile_min_sclk - profile_standard Signed-off-by: Tim Huang Acked-by: Alex Deucher Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 54 ++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 315a6d8bde2e..ef37dda9908f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -54,6 +54,10 @@ #define FEATURE_MASK(feature) (1ULL << feature) +#define SMU_13_0_4_UMD_PSTATE_GFXCLK 938 +#define SMU_13_0_4_UMD_PSTATE_SOCCLK 938 +#define SMU_13_0_4_UMD_PSTATE_FCLK 1875 + #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ @@ -908,6 +912,50 @@ static int smu_v13_0_4_force_clk_levels(struct smu_context *smu, return ret; } +static int smu_v13_0_4_get_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type, + uint32_t *min_clk, + uint32_t *max_clk) +{ + int ret = 0; + uint32_t clk_limit = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + clk_limit = SMU_13_0_4_UMD_PSTATE_GFXCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); + break; + case SMU_SOCCLK: + clk_limit = SMU_13_0_4_UMD_PSTATE_SOCCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); + break; + case SMU_FCLK: + clk_limit = SMU_13_0_4_UMD_PSTATE_FCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); + break; + case SMU_VCLK: + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); + break; + case SMU_DCLK: + smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); + break; + default: + ret = -EINVAL; + break; + } + *min_clk = *max_clk = clk_limit; + return ret; +} + static int smu_v13_0_4_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { @@ -955,7 +1003,11 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - /* Temporarily do nothing since the optimal clocks haven't been provided yet */ + smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); + smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); + smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); + smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: From 3537d6a48c50ed37e419f89931a5acdb6c56c6d6 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 7 Jun 2023 01:41:22 -0500 Subject: [PATCH 849/861] drm/amd: Make sure image is written to trigger VBIOS image update flow The VBIOS image update flow requires userspace to: 1) Write the image to `psp_vbflash` 2) Read `psp_vbflash` 3) Poll `psp_vbflash_status` to check for completion If userspace reads `psp_vbflash` before writing an image, it's possible that it causes problems that can put the dGPU into an invalid state. Explicitly check that an image has been written before letting a read succeed. Cc: stable@vger.kernel.org Fixes: 8424f2ccb3c0 ("drm/amdgpu/psp: Add vbflash sysfs interface support") Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 27559a05a67b..8c60db176119 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3617,6 +3617,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, void *fw_pri_cpu_addr; int ret; + if (adev->psp.vbflash_image_size == 0) + return -EINVAL; + dev_info(adev->dev, "VBIOS flash to PSP started"); ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, From fe56c6ee0457035ae8fbbc2aa5ddfcfac6ded787 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 7 Jun 2023 01:45:20 -0500 Subject: [PATCH 850/861] drm/amd: Tighten permissions on VBIOS flashing attributes Non-root users shouldn't be able to try to trigger a VBIOS flash or query the flashing status. This should be reserved for users with the appropriate permissions. Cc: stable@vger.kernel.org Fixes: 8424f2ccb3c0 ("drm/amdgpu/psp: Add vbflash sysfs interface support") Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8c60db176119..dd865beb39a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3671,13 +3671,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, } static const struct bin_attribute psp_vbflash_bin_attr = { - .attr = {.name = "psp_vbflash", .mode = 0664}, + .attr = {.name = "psp_vbflash", .mode = 0660}, .size = 0, .write = amdgpu_psp_vbflash_write, .read = amdgpu_psp_vbflash_read, }; -static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL); +static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) { From 09d49e14ea6fd125a21f89b80f888c09be32a174 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 23 May 2023 11:57:27 -0400 Subject: [PATCH 851/861] drm/amdkfd: fix and enable debugging for gfx11 There are a couple of fixes required to enable gfx11 debugging. First, ADD_QUEUE.trap_en is an inappropriate place to toggle a per-process register so move it to SET_SHADER_DEBUGGER.trap_en. When ADD_QUEUE.skip_process_ctx_clear is set, MES will prioritize the SET_SHADER_DEBUGGER.trap_en setting. Second, to preserve correct save/restore priviledged wave states in coordination with the trap enablement setting, resume suspended waves early in the disable call. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 4 +++- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 14 ++++++-------- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 3 +-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 12 +++++++----- drivers/gpu/drm/amd/include/mes_v11_api_def.h | 1 + 7 files changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 20cc3fffe921..e9091ebfe230 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -928,7 +928,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, const uint32_t *tcp_watch_cntl, - uint32_t flags) + uint32_t flags, + bool trap_en) { struct mes_misc_op_input op_input = {0}; int r; @@ -945,6 +946,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); + if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> + AMDGPU_MES_API_VERSION_SHIFT) >= 14) + op_input.set_shader_debugger.trap_en = trap_en; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index b5f5eed2b5ef..2d6ac30b7135 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -294,6 +294,7 @@ struct mes_misc_op_input { } flags; uint32_t spi_gdbg_per_vmid_cntl; uint32_t tcp_watch_cntl[4]; + uint32_t trap_en; } set_shader_debugger; }; }; @@ -361,7 +362,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, const uint32_t *tcp_watch_cntl, - uint32_t flags); + uint32_t flags, + bool trap_en); int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, int queue_type, int idx, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index c4e3cb8d44de..1bdaa00c0b46 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -347,6 +347,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl, input->set_shader_debugger.tcp_watch_cntl, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); + misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; default: DRM_ERROR("unsupported misc op (%d) \n", input->op); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 125274445f43..cd34e7aaead4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -349,12 +349,13 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t flags = pdd->process->dbg_flags; + bool sq_trap_en = !!spi_dbg_cntl; if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) return 0; return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl, - pdd->watch_points, flags); + pdd->watch_points, flags, sq_trap_en); } #define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1 @@ -557,6 +558,10 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (!unwind) { uint32_t flags = 0; + int resume_count = resume_queues(target, 0, NULL); + + if (resume_count) + pr_debug("Resumed %d queues\n", resume_count); cancel_work_sync(&target->debug_event_workarea); kfd_dbg_clear_process_address_watch(target); @@ -598,13 +603,6 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind } kfd_dbg_set_workaround(target, false); - - if (!unwind) { - int resume_count = resume_queues(target, 0, NULL); - - if (resume_count) - pr_debug("Resumed %d queues\n", resume_count); - } } static void kfd_dbg_clean_exception_status(struct kfd_process *target) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 498ad7d4e7d9..d6b15493fffd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -227,8 +227,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.tba_addr = qpd->tba_addr; queue_input.tma_addr = qpd->tma_addr; queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || - KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0) || - q->properties.is_dbg_wa; + KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0); queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; queue_type = convert_to_mes_queue_type(q->properties.type); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index faa7939f35bd..90b86a6ac7bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1863,13 +1863,15 @@ static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *de { bool firmware_supported = true; - /* - * FIXME: GFX11 FW currently not sufficient to deal with CWSR WA. - * Updated FW with API changes coming soon. - */ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { - firmware_supported = false; + uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version & + AMDGPU_MES_API_VERSION_MASK) >> + AMDGPU_MES_API_VERSION_SHIFT; + uint32_t mes_rev = dev->gpu->adev->mes.sched_version & + AMDGPU_MES_VERSION_MASK; + + firmware_supported = (mes_api_rev >= 14) && (mes_rev >= 64); goto out; } diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index f3c15f18ddb5..0997e999416a 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -575,6 +575,7 @@ struct SET_SHADER_DEBUGGER { } flags; uint32_t spi_gdbg_per_vmid_cntl; uint32_t tcp_watch_cntl[4]; /* TCP_WATCHx_CNTL */ + uint32_t trap_en; }; union MESAPI__MISC { From 11b92df8a2f7f4605ccc764ce6ae4a72760674df Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Wed, 22 Mar 2023 14:29:26 -0400 Subject: [PATCH 852/861] Revert "Revert drm/amd/display: Enable Freesync Video Mode by default" This reverts commit 4243c84aa082d8fba70c45f48eb2bb5c19799060. Enables freesync video by default, since the hang and corruption issue on eDP panels are now fixed. Acked-by: Stylon Wang Signed-off-by: Aurabindo Pillai Reviewed-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a6bc2c6cad5c..e14704b764e0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6039,8 +6039,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing = amdgpu_freesync_vid_mode && - is_freesync_video_mode(&mode, aconnector); + recalculate_timing = is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); @@ -7233,7 +7232,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!edid) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -9293,8 +9292,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (amdgpu_freesync_vid_mode && - dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -9336,7 +9334,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, } /* Now check if we should set freesync video mode */ - if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, @@ -9350,7 +9348,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (amdgpu_freesync_vid_mode && aconnector && + } else if (aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; From 9d65b1b4bcf3918164e17365eec169875eef8ee3 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Tue, 23 May 2023 12:02:32 +0800 Subject: [PATCH 853/861] drm/amdgpu: add the accelerator PCIe class Add the accelerator PCIe class and match the class in amdgpu for 0x1002 devices of that class. From PCI spec: "PCI Code and ID Assignment, r1.9, sec 1, 1.19" Signed-off-by: Shiwu Zhang Acked-by: Lijo Lazar Acked-by: Bjorn Helgaas # pci_ids.h Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 +- drivers/gpu/drm/amd/include/amd_shared.h | 1 - include/linux/pci_ids.h | 3 +++ 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b8a1e4571cd9..8e58d187b173 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2044,7 +2044,7 @@ static const struct pci_device_id pciidlist[] = { .driver_data = CHIP_IP_DISCOVERY }, { PCI_DEVICE(0x1002, PCI_ANY_ID), - .class = AMD_ACCELERATOR_PROCESSING << 8, + .class = PCI_CLASS_ACCELERATOR_PROCESSING << 8, .class_mask = 0xffffff, .driver_data = CHIP_IP_DISCOVERY }, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 0f1ca0136f50..25b4d7f0bd35 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -57,7 +57,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) /* enable virtual display */ if (adev->asic_type != CHIP_ALDEBARAN && adev->asic_type != CHIP_ARCTURUS && - ((adev->pdev->class >> 8) != AMD_ACCELERATOR_PROCESSING)) { + ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) { if (adev->mode_info.num_crtc == 0) adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 57d95e2cc54b..f175e65b853a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -27,7 +27,6 @@ #define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */ -#define AMD_ACCELERATOR_PROCESSING 0x1200 /* hardcoded pci class */ /* * Chip flags diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 45c3d62e616d..0fbfbda3dc26 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -151,6 +151,9 @@ #define PCI_CLASS_SP_DPIO 0x1100 #define PCI_CLASS_SP_OTHER 0x1180 +#define PCI_BASE_CLASS_ACCELERATOR 0x12 +#define PCI_CLASS_ACCELERATOR_PROCESSING 0x1200 + #define PCI_CLASS_OTHERS 0xff /* Vendors and devices. Sort key: vendor first, device next. */ From 4e70da985cef954cdf7813d651c067d2c602ea71 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 8 Jun 2023 10:01:57 -0700 Subject: [PATCH 854/861] drm/amdgpu: Wrap -Wunused-but-set-variable in cc-option -Wunused-but-set-variable was only supported in clang starting with 13.0.0, so earlier versions will emit a warning, which is turned into a hard error for the kernel to mirror GCC: error: unknown warning option '-Wunused-but-set-variable'; did you mean '-Wunused-const-variable'? [-Werror,-Wunknown-warning-option] The minimum supported version of clang for building the kernel is 11.0.0, so match the rest of the kernel and wrap -Wunused-but-set-variable in a cc-option call, so that it is only used when supported by the compiler. Closes: https://github.com/ClangBuiltLinux/linux/issues/1869 Fixes: 1b320ad3f5a8 ("drm/amd/amdgpu: introduce DRM_AMDGPU_WERROR") Signed-off-by: Nathan Chancellor Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7ee68b1bbfed..86b833085f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -40,7 +40,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/amdkfd subdir-ccflags-y := -Wextra -subdir-ccflags-y += -Wunused-but-set-variable +subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) subdir-ccflags-y += -Wno-unused-parameter subdir-ccflags-y += -Wno-type-limits subdir-ccflags-y += -Wno-sign-compare From 80e709ee6ecc9eba8bd8d188218472822e1b38bd Mon Sep 17 00:00:00 2001 From: Chong Li Date: Wed, 7 Jun 2023 15:56:12 +0800 Subject: [PATCH 855/861] drm/amdgpu: add option params to enforce process isolation between graphics and compute enforce process isolation between graphics and compute via using the same reserved vmid. v2: remove params "struct amdgpu_vm *vm" from amdgpu_vmid_alloc_reserved and amdgpu_vmid_free_reserved. Signed-off-by: Chong Li Reviewed-by: Christian Koenig Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 10 +++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 17 +++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 6 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 25 +++++++++++++++++-------- 5 files changed, 36 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 74104d46a7be..a84bd4a0c421 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -214,6 +214,7 @@ extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; extern int amdgpu_mtype_local; +extern bool enforce_isolation; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8e58d187b173..999d008b6b48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -153,7 +153,7 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu; char *amdgpu_virtual_display; - +bool enforce_isolation; /* * OverDrive(bit 14) disabled by default * GFX DCS(bit 19) disabled by default @@ -973,6 +973,14 @@ MODULE_PARM_DESC( 4 = AMDGPU_CPX_PARTITION_MODE)"); module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); + +/** + * DOC: enforce_isolation (bool) + * enforce process isolation between graphics and compute via using the same reserved vmid. + */ +module_param(enforce_isolation, bool, 0444); +MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on"); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index c991ca0b7a1c..ff1ea99292fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !idle) goto error; - if (vm->reserved_vmid[vmhub]) { + if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; @@ -460,14 +460,11 @@ error: } int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) - goto unlock; ++id_mgr->reserved_use_count; if (!id_mgr->reserved) { @@ -479,27 +476,23 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, list_del_init(&id->list); id_mgr->reserved = id; } - vm->reserved_vmid[vmhub] = true; -unlock: mutex_unlock(&id_mgr->lock); return 0; } void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub] && - !--id_mgr->reserved_use_count) { + if (!--id_mgr->reserved_use_count) { /* give the reserved ID back to normal round robin */ list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); id_mgr->reserved = NULL; } - vm->reserved_vmid[vmhub] = false; + mutex_unlock(&id_mgr->lock); } @@ -578,6 +571,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); } } + /* alloc a default reserved vmid to enforce isolation */ + if (enforce_isolation) + amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); + } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index d1cc09b45da4..fa8c42c83d5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -79,11 +79,9 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub); + unsigned vmhub); void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub); + unsigned vmhub); int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **fence); void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 76d57bc7ac62..dc80c9c8fd14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2284,8 +2284,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } dma_fence_put(vm->last_update); - for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) - amdgpu_vmid_free_reserved(adev, vm, i); + + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { + if (vm->reserved_vmid[i]) { + amdgpu_vmid_free_reserved(adev, i); + vm->reserved_vmid[i] = false; + } + } + } /** @@ -2368,7 +2374,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; - int r; /* No valid flags defined yet */ if (args->in.flags) @@ -2377,13 +2382,17 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, - AMDGPU_GFXHUB(0)); - if (r) - return r; + if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { + amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); + fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; + } + break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB(0)); + if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { + amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0)); + fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; + } break; default: return -EINVAL; From 3ffb193969c57afd4096cfb107ca2cc3bb0c55d9 Mon Sep 17 00:00:00 2001 From: shikaguo Date: Fri, 9 Jun 2023 11:32:09 +0800 Subject: [PATCH 856/861] drm/amd/pm: enable more Pstates profile levels for yellow_carp This patch enables following UMD stable Pstates profile levels for power_dpm_force_performance_level interface. - profile_peak - profile_min_mclk - profile_min_sclk - profile_standard Signed-off-by: shikaguo Reviewed-by: Tim Huang Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 141 +++++++++++++++++- .../drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h | 1 - 2 files changed, 137 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a92da336ecec..a1be2029ba4a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -47,6 +47,14 @@ #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1L +#define SMU_13_0_8_UMD_PSTATE_GFXCLK 533 +#define SMU_13_0_8_UMD_PSTATE_SOCCLK 533 +#define SMU_13_0_8_UMD_PSTATE_FCLK 800 + +#define SMU_13_0_1_UMD_PSTATE_GFXCLK 700 +#define SMU_13_0_1_UMD_PSTATE_SOCCLK 678 +#define SMU_13_0_1_UMD_PSTATE_FCLK 1800 + #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ @@ -957,6 +965,9 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, uint32_t max) { enum smu_message_type msg_set_min, msg_set_max; + uint32_t min_clk = min; + uint32_t max_clk = max; + int ret = 0; if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) @@ -985,11 +996,17 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + if (clk_type == SMU_VCLK) { + min_clk = min << SMU_13_VCLK_SHIFT; + max_clk = max << SMU_13_VCLK_SHIFT; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL); + if (ret) goto out; - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max_clk, NULL); if (ret) goto out; @@ -997,12 +1014,49 @@ out: return ret; } +static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu, + enum smu_clk_type clk_type) +{ + uint32_t clk_limit = 0; + struct amdgpu_device *adev = smu->adev; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) + clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK; + if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || + (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) + clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK; + break; + case SMU_SOCCLK: + if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) + clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK; + if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || + (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) + clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK; + break; + case SMU_FCLK: + if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) + clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK; + if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || + (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) + clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK; + break; + default: + break; + } + + return clk_limit; +} + static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; + uint32_t clk_limit = 0; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1044,6 +1098,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, break; case SMU_GFXCLK: case SMU_SCLK: + clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type); ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) goto print_clk_out; @@ -1058,7 +1113,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, i == 0 ? "*" : ""); size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", - i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, + i == 1 ? cur_value : clk_limit, i == 1 ? "*" : ""); size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, i == 2 ? "*" : ""); @@ -1107,6 +1162,49 @@ force_level_out: return ret; } +static int yellow_carp_get_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type, + uint32_t *min_clk, + uint32_t *max_clk) +{ + int ret = 0; + uint32_t clk_limit = 0; + + clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type); + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); + break; + case SMU_SOCCLK: + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); + break; + case SMU_FCLK: + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); + break; + case SMU_VCLK: + yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); + break; + case SMU_DCLK: + yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); + break; + default: + ret = -EINVAL; + break; + } + *min_clk = *max_clk = clk_limit; + return ret; +} + static int yellow_carp_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { @@ -1114,6 +1212,9 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, uint32_t sclk_min = 0, sclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; + uint32_t vclk_min = 0, vclk_max = 0; + uint32_t dclk_min = 0, dclk_max = 0; + int ret = 0; switch (level) { @@ -1121,28 +1222,42 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); + yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; + vclk_min = vclk_max; + dclk_min = dclk_max; break; case AMD_DPM_FORCED_LEVEL_LOW: yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); + yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; + vclk_max = vclk_min; + dclk_max = dclk_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); + yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - /* Temporarily do nothing since the optimal clocks haven't been provided yet */ + yellow_carp_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); + yellow_carp_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); + yellow_carp_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); + yellow_carp_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); + yellow_carp_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1182,6 +1297,24 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, return ret; } + if (vclk_min && vclk_max) { + ret = yellow_carp_set_soft_freq_limited_range(smu, + SMU_VCLK, + vclk_min, + vclk_max); + if (ret) + return ret; + } + + if (dclk_min && dclk_max) { + ret = yellow_carp_set_soft_freq_limited_range(smu, + SMU_DCLK, + dclk_min, + dclk_max); + if (ret) + return ret; + } + return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h index a9205a8ea3ad..b3ad8352c68a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h @@ -24,6 +24,5 @@ #define __YELLOW_CARP_PPT_H__ extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); -#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100 #endif From 389c6b3e120303715c018d1bfc7bab02b50ca3f4 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Fri, 19 May 2023 20:43:30 -0400 Subject: [PATCH 857/861] drm/amd/display: add NULL pointer check [why] check dmub_Srv exist or not before accessing dmub. Acked-by: Stylon Wang Signed-off-by: Charlene Liu Reviewed-by: Zhan Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 0d3ec50b1385..6e11d2b701f8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -276,8 +276,8 @@ static void program_cursor_attributes( } dc->hwss.set_cursor_attribute(pipe_ctx); - - dc_send_update_cursor_info_to_dmu(pipe_ctx, i); + if (dc->ctx->dmub_srv) + dc_send_update_cursor_info_to_dmu(pipe_ctx, i); if (dc->hwss.set_cursor_sdr_white_level) dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } @@ -396,8 +396,8 @@ static void program_cursor_position( } dc->hwss.set_cursor_position(pipe_ctx); - - dc_send_update_cursor_info_to_dmu(pipe_ctx, i); + if (dc->ctx->dmub_srv) + dc_send_update_cursor_info_to_dmu(pipe_ctx, i); } if (pipe_to_program) From 57a8011512131c63cf700d42ef56ad875409a1a5 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Thu, 8 Jun 2023 22:07:11 +0800 Subject: [PATCH 858/861] drm/amd/pm: workaround for compute workload type on some skus On smu 13.0.0, the compute workload type cannot be set on all the skus due to some other problems. This workaround is to make sure compute workload type can also run on some specific skus. v2: keep the variable consistent Signed-off-by: Kenneth Feng Acked-by: Lijo Lazar Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 33 +++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 413e592f0ed6..4dd01b3f350f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2179,10 +2179,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && + (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || + ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_COMPUTE_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_CUSTOM); + } else { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, smu->power_profile_mode); + } + if (workload_type < 0) return -EINVAL; From aeb3dd7e6f91da0a8c460d61fad13db85b3b33b7 Mon Sep 17 00:00:00 2001 From: Leo Ma Date: Thu, 18 May 2023 14:07:13 -0400 Subject: [PATCH 859/861] Revert "drm/amd/display: cache trace buffer size" Revert commit 257e9891db0b ("drm/amd/display: cache trace buffer size") to fix regression found in tests. Acked-by: Stylon Wang Signed-off-by: Leo Ma Reviewed-by: Josip Pavic Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 1 - drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index e210cb082ebd..7c9a2b34bd05 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -428,7 +428,6 @@ struct dmub_srv { enum dmub_asic asic; void *user_ctx; uint32_t fw_version; - uint32_t trace_buffer_size; bool is_virtual; struct dmub_fb scratch_mem_fb; volatile const struct dmub_fw_state *fw_state; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 8b9af18e84fe..9e9a6a44a7ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -427,8 +427,6 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, dmub->fw_version = fw_info->fw_version; } - dmub->trace_buffer_size = trace_buffer_size; - trace_buff->base = dmub_align(mail->top, 256); trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); From 24e52fc20201c87912eee8f337829c036c3b0f3a Mon Sep 17 00:00:00 2001 From: Sridevi Date: Wed, 10 May 2023 14:34:55 -0400 Subject: [PATCH 860/861] drm/amd/display: DSC Programming Deltas [Why] Programming register delta for DSC sub-block [How] Change DSC, resource files for programming register delta. Acked-by: Stylon Wang Signed-off-by: Sridevi Reviewed-by: Chris Park Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 29 +++++++------------ .../gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h | 28 ++++++++++++++++++ 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 5bd698cd6d20..5eebe7f03ddc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -30,22 +30,13 @@ #include "dsc/dscc_types.h" #include "dsc/rc_calc.h" -static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps); -static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, - struct dsc_optc_config *dsc_optc_cfg); -static void dsc_init_reg_values(struct dsc_reg_values *reg_vals); -static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params); static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); -static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple); -static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth); /* Object I/F functions */ -static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); -static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe); static void dsc2_disable(struct display_stream_compressor *dsc); static void dsc2_disconnect(struct display_stream_compressor *dsc); @@ -108,7 +99,7 @@ void dsc2_construct(struct dcn20_dsc *dsc, /* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput * can be doubled, tripled etc. by using additional DSC engines. */ -static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ @@ -184,7 +175,7 @@ static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const st } -static void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config) +void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config) { DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h); DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v); @@ -211,7 +202,7 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct } -static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps) +bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps) { bool is_config_ok; struct dsc_reg_values dsc_reg_vals; @@ -291,7 +282,7 @@ static void dsc2_disconnect(struct display_stream_compressor *dsc) } /* This module's internal functions */ -static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps) +void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps) { int i; int bits_per_pixel = pps->bits_per_pixel; @@ -345,7 +336,7 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co } } -static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) +void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) { uint8_t i; @@ -372,7 +363,7 @@ static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_ rc->flatness_det_thresh = override->flatness_det_thresh; } -static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, +bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, struct dsc_optc_config *dsc_optc_cfg) { struct dsc_parameters dsc_params; @@ -463,7 +454,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ } -static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple) +enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple) { enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN; @@ -495,7 +486,7 @@ static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_p } -static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth) +enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth) { enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN; @@ -518,7 +509,7 @@ static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_co } -static void dsc_init_reg_values(struct dsc_reg_values *reg_vals) +void dsc_init_reg_values(struct dsc_reg_values *reg_vals) { int i; @@ -574,7 +565,7 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals) * This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn * affects non-PPS register values. */ -static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params) +void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 7ce64a3c1b02..ba869387c3c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -549,6 +549,27 @@ struct dcn20_dsc { int max_image_width; }; +void dsc_config_log(struct display_stream_compressor *dsc, + const struct dsc_config *config); + +void dsc_log_pps(struct display_stream_compressor *dsc, + struct drm_dsc_config *pps); + +void dsc_override_rc_params(struct rc_params *rc, + const struct dc_dsc_rc_params_override *override); + +bool dsc_prepare_config(const struct dsc_config *dsc_cfg, + struct dsc_reg_values *dsc_reg_vals, + struct dsc_optc_config *dsc_optc_cfg); + +enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, + bool is_ycbcr422_simple); + +enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth); + +void dsc_init_reg_values(struct dsc_reg_values *reg_vals); + +void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params); void dsc2_construct(struct dcn20_dsc *dsc, struct dc_context *ctx, @@ -557,5 +578,12 @@ void dsc2_construct(struct dcn20_dsc *dsc, const struct dcn20_dsc_shift *dsc_shift, const struct dcn20_dsc_mask *dsc_mask); +void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, + int pixel_clock_100Hz); + +bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, + const struct dsc_config *dsc_cfg, + uint8_t *dsc_packed_pps); + #endif From 3b718dcaf163d17fe907ea098c8449e0cd6bc271 Mon Sep 17 00:00:00 2001 From: Austin Zheng Date: Wed, 24 May 2023 11:52:12 -0400 Subject: [PATCH 861/861] drm/amd/display: Filter out AC mode frequencies on DC mode systems Why: Limit maximum clock speeds to DC mode limits for DC mode systems How: Store DC mode limits when individual clocks are initialized and cap the values when building the clock table Acked-by: Stylon Wang Signed-off-by: Austin Zheng Reviewed-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 13 +- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 140 ++++++++++++----- .../amd/display/dc/dml/dcn321/dcn321_fpu.c | 144 +++++++++++++----- .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 1 + 5 files changed, 216 insertions(+), 83 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 20bff6a346ba..96fa68f166e0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -182,23 +182,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_entries_per_clk->num_dcfclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK); /* SOCCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK, &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz, &num_entries_per_clk->num_socclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK); /* DTBCLK */ - if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) + if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) { dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz, &num_entries_per_clk->num_dtbclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = + dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK); + } /* DISPCLK */ dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz, &num_entries_per_clk->num_dispclk_levels); num_levels = num_entries_per_clk->num_dispclk_levels; + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK); + //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x + if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; if (num_entries_per_clk->num_dcfclk_levels && num_entries_per_clk->num_dtbclk_levels && @@ -817,6 +826,7 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_UCLK, &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, &num_entries_per_clk->num_memclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); /* memclk must have at least one level */ num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; @@ -824,6 +834,7 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) dcn32_init_single_clock(clk_mgr, PPCLK_FCLK, &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, &num_entries_per_clk->num_fclk_levels); + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK); if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) { num_levels = num_entries_per_clk->num_memclk_levels; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7ded574f84ff..360dd83b1a7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -897,6 +897,7 @@ struct dc_debug_options { uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; bool enable_legacy_fast_update; + bool disable_dc_mode_overwrite; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index ad6ee48580f8..fa3678342abb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2325,14 +2325,48 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; } -static int build_synthetic_soc_states(struct clk_bw_params *bw_params, +/* + * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings + * Input: + * max_clk_limit - struct containing the desired clock timings + * Output: + * curr_clk_limit - struct containing the timings that need to be overwritten + * Return: 0 upon success, non-zero for failure + */ +static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, + struct clk_limit_table_entry *curr_clk_limit) +{ + if (NULL == max_clk_limit || NULL == curr_clk_limit) + return -1; //invalid parameters + + //only overwrite if desired max clock frequency is initialized + if (max_clk_limit->dcfclk_mhz != 0) + curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; + + if (max_clk_limit->fclk_mhz != 0) + curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; + + if (max_clk_limit->memclk_mhz != 0) + curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; + + if (max_clk_limit->socclk_mhz != 0) + curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; + + if (max_clk_limit->dtbclk_mhz != 0) + curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; + + if (max_clk_limit->dispclk_mhz != 0) + curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; + + return 0; +} + +static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { int i, j; struct _vcs_dpi_voltage_scaling_st entry = {0}; - - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, - max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + struct clk_limit_table_entry max_clk_data = {0}; unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; @@ -2343,51 +2377,76 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, unsigned int num_fclk_dpms = 0; unsigned int num_dcfclk_dpms = 0; - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) - max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) - max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) - max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + unsigned int num_dc_uclk_dpms = 0; + unsigned int num_dc_fclk_dpms = 0; + unsigned int num_dc_dcfclk_dpms = 0; - if (bw_params->clk_table.entries[i].memclk_mhz > 0) + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) + max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) + max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) + max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) + max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) + max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) + max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) + max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + + if (bw_params->clk_table.entries[i].memclk_mhz > 0) { num_uclk_dpms++; - if (bw_params->clk_table.entries[i].fclk_mhz > 0) + if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) + num_dc_uclk_dpms++; + } + if (bw_params->clk_table.entries[i].fclk_mhz > 0) { num_fclk_dpms++; - if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) + if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) + num_dc_fclk_dpms++; + } + if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { num_dcfclk_dpms++; + if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) + num_dc_dcfclk_dpms++; + } + } + + if (!disable_dc_mode_overwrite) { + //Overwrite max frequencies with max DC mode frequencies for DC mode systems + override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data); + num_uclk_dpms = num_dc_uclk_dpms; + num_fclk_dpms = num_dc_fclk_dpms; + num_dcfclk_dpms = num_dc_dcfclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; } if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; - if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) + if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) return -1; - if (max_dppclk_mhz == 0) - max_dppclk_mhz = max_dispclk_mhz; + if (max_clk_data.dppclk_mhz == 0) + max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; - if (max_fclk_mhz == 0) - max_fclk_mhz = max_dcfclk_mhz * dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; + if (max_clk_data.fclk_mhz == 0) + max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * + dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; - if (max_phyclk_mhz == 0) - max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; + if (max_clk_data.phyclk_mhz == 0) + max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; - entry.dispclk_mhz = max_dispclk_mhz; - entry.dscclk_mhz = max_dispclk_mhz / 3; - entry.dppclk_mhz = max_dppclk_mhz; - entry.dtbclk_mhz = max_dtbclk_mhz; - entry.phyclk_mhz = max_phyclk_mhz; + entry.dispclk_mhz = max_clk_data.dispclk_mhz; + entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; + entry.dppclk_mhz = max_clk_data.dppclk_mhz; + entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; + entry.phyclk_mhz = max_clk_data.phyclk_mhz; entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; @@ -2401,7 +2460,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, } // Insert the max DCFCLK - entry.dcfclk_mhz = max_dcfclk_mhz; + entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; @@ -2429,7 +2488,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // If FCLK fine grained, only insert max else { entry.dcfclk_mhz = 0; - entry.fabricclk_mhz = max_fclk_mhz; + entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; insert_entry_into_table_sorted(table, num_entries, &entry); @@ -2441,9 +2500,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // Remove states that require higher clocks than are supported for (i = *num_entries - 1; i >= 0 ; i--) { - if (table[i].dcfclk_mhz > max_dcfclk_mhz || - table[i].fabricclk_mhz > max_fclk_mhz || - table[i].dram_speed_mts > max_uclk_mhz * 16) + if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || + table[i].fabricclk_mhz > max_clk_data.fclk_mhz || + table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) remove_entry_from_table_at_index(table, num_entries, i); } @@ -2756,7 +2815,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; } } else { - build_synthetic_soc_states(bw_params, dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); + build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params, + dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); } /* Re-init DML with updated bb */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 1aaff6f2d453..f0683fd9d3f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -252,14 +252,48 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); } -static int build_synthetic_soc_states(struct clk_bw_params *bw_params, +/* + * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings + * Input: + * max_clk_limit - struct containing the desired clock timings + * Output: + * curr_clk_limit - struct containing the timings that need to be overwritten + * Return: 0 upon success, non-zero for failure + */ +static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit, + struct clk_limit_table_entry *curr_clk_limit) +{ + if (NULL == max_clk_limit || NULL == curr_clk_limit) + return -1; //invalid parameters + + //only overwrite if desired max clock frequency is initialized + if (max_clk_limit->dcfclk_mhz != 0) + curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz; + + if (max_clk_limit->fclk_mhz != 0) + curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz; + + if (max_clk_limit->memclk_mhz != 0) + curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz; + + if (max_clk_limit->socclk_mhz != 0) + curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz; + + if (max_clk_limit->dtbclk_mhz != 0) + curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz; + + if (max_clk_limit->dispclk_mhz != 0) + curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz; + + return 0; +} + +static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params, struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) { int i, j; struct _vcs_dpi_voltage_scaling_st entry = {0}; - - unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, - max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; + struct clk_limit_table_entry max_clk_data = {0}; unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; @@ -270,53 +304,78 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, unsigned int num_fclk_dpms = 0; unsigned int num_dcfclk_dpms = 0; - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) - max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) - max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) - max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + unsigned int num_dc_uclk_dpms = 0; + unsigned int num_dc_fclk_dpms = 0; + unsigned int num_dc_dcfclk_dpms = 0; - if (bw_params->clk_table.entries[i].memclk_mhz > 0) + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) + max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) + max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) + max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) + max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) + max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz) + max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz) + max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + + if (bw_params->clk_table.entries[i].memclk_mhz > 0) { num_uclk_dpms++; - if (bw_params->clk_table.entries[i].fclk_mhz > 0) + if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz) + num_dc_uclk_dpms++; + } + if (bw_params->clk_table.entries[i].fclk_mhz > 0) { num_fclk_dpms++; - if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) + if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz) + num_dc_fclk_dpms++; + } + if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) { num_dcfclk_dpms++; + if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz) + num_dc_dcfclk_dpms++; + } + } + + if (!disable_dc_mode_overwrite) { + //Overwrite max frequencies with max DC mode frequencies for DC mode systems + override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data); + num_uclk_dpms = num_dc_uclk_dpms; + num_fclk_dpms = num_dc_fclk_dpms; + num_dcfclk_dpms = num_dc_dcfclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms; + bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms; } if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; - if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) + if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz) return -1; - if (max_dppclk_mhz == 0) - max_dppclk_mhz = max_dispclk_mhz; + if (max_clk_data.dppclk_mhz == 0) + max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz; - if (max_fclk_mhz == 0) - max_fclk_mhz = max_dcfclk_mhz * dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / dcn3_21_soc.pct_ideal_fabric_bw_after_urgent; + if (max_clk_data.fclk_mhz == 0) + max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz * + dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / + dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; - if (max_phyclk_mhz == 0) - max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz; + if (max_clk_data.phyclk_mhz == 0) + max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; *num_entries = 0; - entry.dispclk_mhz = max_dispclk_mhz; - entry.dscclk_mhz = max_dispclk_mhz / 3; - entry.dppclk_mhz = max_dppclk_mhz; - entry.dtbclk_mhz = max_dtbclk_mhz; - entry.phyclk_mhz = max_phyclk_mhz; - entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz; - entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; + entry.dispclk_mhz = max_clk_data.dispclk_mhz; + entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3; + entry.dppclk_mhz = max_clk_data.dppclk_mhz; + entry.dtbclk_mhz = max_clk_data.dtbclk_mhz; + entry.phyclk_mhz = max_clk_data.phyclk_mhz; + entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; + entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; // Insert all the DCFCLK STAs for (i = 0; i < num_dcfclk_stas; i++) { @@ -328,7 +387,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, } // Insert the max DCFCLK - entry.dcfclk_mhz = max_dcfclk_mhz; + entry.dcfclk_mhz = max_clk_data.dcfclk_mhz; entry.fabricclk_mhz = 0; entry.dram_speed_mts = 0; @@ -356,7 +415,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // If FCLK fine grained, only insert max else { entry.dcfclk_mhz = 0; - entry.fabricclk_mhz = max_fclk_mhz; + entry.fabricclk_mhz = max_clk_data.fclk_mhz; entry.dram_speed_mts = 0; dcn321_insert_entry_into_table_sorted(table, num_entries, &entry); @@ -368,9 +427,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params, // Remove states that require higher clocks than are supported for (i = *num_entries - 1; i >= 0 ; i--) { - if (table[i].dcfclk_mhz > max_dcfclk_mhz || - table[i].fabricclk_mhz > max_fclk_mhz || - table[i].dram_speed_mts > max_uclk_mhz * 16) + if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz || + table[i].fabricclk_mhz > max_clk_data.fclk_mhz || + table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16) remove_entry_from_table_at_index(table, num_entries, i); } @@ -689,7 +748,8 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz; } } else { - build_synthetic_soc_states(bw_params, dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); + build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params, + dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states); } /* Re-init DML with updated bb */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index bef843cc32a1..6faf40fa5c69 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -233,6 +233,7 @@ struct clk_bw_params { struct clk_limit_table clk_table; struct wm_table wm_table; struct dummy_pstate_entry dummy_pstate_table[4]; + struct clk_limit_table_entry dc_mode_limit; }; /* Public interfaces */