Merge tag 'amd-drm-next-5.7-2020-03-26' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.7-2020-03-26: amdgpu: - Remove a dpm quirk that is not necessary - Fix handling of AC/DC mode in newer SMU firmwares on navi - SR-IOV fixes - RAS fixes scheduler: - Fix a race condition radeon: - Remove a dpm quirk that is not necessary Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200326155310.5486-1-alexander.deucher@amd.com
This commit is contained in:
commit
c0ca5437c5
@ -2742,6 +2742,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||
|
||||
if (adev->asic_reset_res)
|
||||
goto fail;
|
||||
|
||||
if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
|
||||
adev->mmhub.funcs->reset_ras_error_count(adev);
|
||||
} else {
|
||||
|
||||
task_barrier_full(&hive->tb);
|
||||
@ -3910,8 +3913,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
}
|
||||
}
|
||||
|
||||
if (!r && amdgpu_ras_intr_triggered())
|
||||
if (!r && amdgpu_ras_intr_triggered()) {
|
||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||
if (tmp_adev->mmhub.funcs &&
|
||||
tmp_adev->mmhub.funcs->reset_ras_error_count)
|
||||
tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
|
||||
}
|
||||
|
||||
amdgpu_ras_intr_cleared();
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||
if (need_full_reset) {
|
||||
|
@ -92,6 +92,9 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
||||
if (adev->powerplay.pp_funcs->enable_bapm)
|
||||
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
if (is_support_sw_smu(adev))
|
||||
smu_set_ac_dc(&adev->smu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,10 +159,6 @@ static int psp_sw_fini(void *handle)
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
if (adev->psp.cap_fw) {
|
||||
release_firmware(adev->psp.cap_fw);
|
||||
adev->psp.cap_fw = NULL;
|
||||
}
|
||||
if (adev->psp.ta_fw) {
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
@ -250,7 +246,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
|
||||
psp->cmd_buf_mem->cmd_id,
|
||||
psp->cmd_buf_mem->resp.status);
|
||||
if ((ucode->ucode_id == AMDGPU_UCODE_ID_CAP) || !timeout) {
|
||||
if (!timeout) {
|
||||
mutex_unlock(&psp->mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1192,9 +1188,6 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
|
||||
enum psp_gfx_fw_type *type)
|
||||
{
|
||||
switch (ucode->ucode_id) {
|
||||
case AMDGPU_UCODE_ID_CAP:
|
||||
*type = GFX_FW_TYPE_CAP;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
*type = GFX_FW_TYPE_SDMA0;
|
||||
break;
|
||||
|
@ -252,9 +252,6 @@ struct psp_context
|
||||
uint32_t asd_ucode_size;
|
||||
uint8_t *asd_start_addr;
|
||||
|
||||
/* cap firmware */
|
||||
const struct firmware *cap_fw;
|
||||
|
||||
/* fence buffer */
|
||||
struct amdgpu_bo *fence_buf_bo;
|
||||
uint64_t fence_buf_mc_addr;
|
||||
|
@ -281,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||
struct ras_debug_if data;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_ras_intr_triggered()) {
|
||||
DRM_WARN("RAS WARN: error injection currently inaccessible\n");
|
||||
return size;
|
||||
}
|
||||
|
||||
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
@ -394,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
|
||||
.head = obj->head,
|
||||
};
|
||||
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
"Query currently inaccessible\n");
|
||||
|
||||
if (amdgpu_ras_error_query(obj->adev, &info))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1840,9 +1840,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
*The reserved vram for memory training must be pinned to the specified
|
||||
*place on the VRAM, so reserve it early.
|
||||
*/
|
||||
r = amdgpu_ttm_training_reserve_vram_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_ttm_training_reserve_vram_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* allocate memory as required for VGA
|
||||
* This is used for VGA emulation and pre-OS scanout buffers to
|
||||
|
@ -283,8 +283,7 @@ union amdgpu_firmware_header {
|
||||
* fw loading support
|
||||
*/
|
||||
enum AMDGPU_UCODE_ID {
|
||||
AMDGPU_UCODE_ID_CAP = 0, /* CAP must be the 1st fw to be loaded */
|
||||
AMDGPU_UCODE_ID_SDMA0,
|
||||
AMDGPU_UCODE_ID_SDMA0 = 0,
|
||||
AMDGPU_UCODE_ID_SDMA1,
|
||||
AMDGPU_UCODE_ID_SDMA2,
|
||||
AMDGPU_UCODE_ID_SDMA3,
|
||||
|
@ -1940,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
||||
if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
|
||||
gfx_v10_0_rlc_enable_srm(adev);
|
||||
} else {
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
gfx_v10_0_init_csb(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
adev->gfx.rlc.funcs->stop(adev);
|
||||
|
||||
/* disable CG */
|
||||
|
@ -4306,7 +4306,7 @@ static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x81 },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
|
||||
|
@ -246,7 +246,6 @@ enum psp_gfx_fw_type {
|
||||
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
|
||||
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
|
||||
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
|
||||
GFX_FW_TYPE_CAP = 62, /* CAP_FW VG */
|
||||
GFX_FW_TYPE_MAX
|
||||
};
|
||||
|
||||
|
@ -44,7 +44,6 @@
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
|
||||
|
||||
@ -64,7 +63,6 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
|
||||
char fw_name[30];
|
||||
int err = 0;
|
||||
const struct psp_firmware_header_v1_0 *hdr;
|
||||
struct amdgpu_firmware_info *info = NULL;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
@ -114,26 +112,6 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
|
||||
adev->psp.asd_start_addr = (uint8_t *)hdr +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_VEGA10) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin",
|
||||
chip_name);
|
||||
err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->psp.cap_fw);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CAP;
|
||||
info->fw = adev->psp.cap_fw;
|
||||
hdr = (const struct psp_firmware_header_v1_0 *)
|
||||
adev->psp.cap_fw->data;
|
||||
adev->firmware.fw_size += ALIGN(
|
||||
le32_to_cpu(hdr->header.ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
if (err) {
|
||||
@ -144,8 +122,6 @@ out:
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
release_firmware(adev->psp.cap_fw);
|
||||
adev->psp.cap_fw = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
|
||||
if (adev->asic_type == CHIP_HAINAN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0xC3) ||
|
||||
(adev->pdev->device == 0x6664) ||
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
|
@ -524,8 +524,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
||||
|
||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
||||
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state));
|
||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
|
||||
amdgpu_dm_vrr_active(acrtc_state),
|
||||
acrtc_state->active_planes);
|
||||
|
||||
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
||||
drm_crtc_handle_vblank(&acrtc->base);
|
||||
@ -545,7 +546,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
||||
&acrtc_state->vrr_params.adjust);
|
||||
}
|
||||
|
||||
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
|
||||
/*
|
||||
* If there aren't any active_planes then DCH HUBP may be clock-gated.
|
||||
* In that case, pageflip completion interrupts won't fire and pageflip
|
||||
* completion events won't get delivered. Prevent this by sending
|
||||
* pending pageflip events from here if a flip is still pending.
|
||||
*
|
||||
* If any planes are enabled, use dm_pflip_high_irq() instead, to
|
||||
* avoid race conditions between flip programming and completion,
|
||||
* which could cause too early flip completion events.
|
||||
*/
|
||||
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
|
||||
acrtc_state->active_planes == 0) {
|
||||
if (acrtc->event) {
|
||||
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
|
||||
acrtc->event = NULL;
|
||||
|
@ -1154,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (smu->ppt_funcs->set_power_source) {
|
||||
/*
|
||||
* For Navi1X, manually switch it to AC mode as PMFW
|
||||
* may boot it with DC mode.
|
||||
*/
|
||||
if (adev->pm.ac_power)
|
||||
ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
|
||||
else
|
||||
ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
|
||||
if (ret) {
|
||||
pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
ret = smu_notify_display_change(smu);
|
||||
@ -2072,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int smu_set_ac_dc(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* controlled by firmware */
|
||||
if (smu->dc_controlled_by_gpio)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
if (smu->ppt_funcs->set_power_source) {
|
||||
if (smu->adev->pm.ac_power)
|
||||
ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
|
||||
else
|
||||
ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
|
||||
if (ret)
|
||||
pr_err("Failed to switch to %s mode!\n",
|
||||
smu->adev->pm.ac_power ? "AC" : "DC");
|
||||
}
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs smu_ip_funcs = {
|
||||
.name = "smu",
|
||||
.early_init = smu_early_init,
|
||||
|
@ -408,6 +408,7 @@ struct smu_context
|
||||
uint32_t smc_if_version;
|
||||
|
||||
bool uploading_custom_pp_table;
|
||||
bool dc_controlled_by_gpio;
|
||||
};
|
||||
|
||||
struct i2c_adapter;
|
||||
@ -570,6 +571,7 @@ struct pptable_funcs {
|
||||
int (*override_pcie_parameters)(struct smu_context *smu);
|
||||
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
|
||||
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
|
||||
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
|
||||
};
|
||||
|
||||
int smu_load_microcode(struct smu_context *smu);
|
||||
@ -718,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
|
||||
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
|
||||
int smu_set_display_count(struct smu_context *smu, uint32_t count);
|
||||
int smu_set_ac_dc(struct smu_context *smu);
|
||||
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
|
||||
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
|
||||
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
|
||||
|
@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
|
||||
int smu_v11_0_set_performance_level(struct smu_context *smu,
|
||||
enum amd_dpm_forced_level level);
|
||||
|
||||
int smu_v11_0_set_power_source(struct smu_context *smu,
|
||||
enum smu_power_src_type power_src);
|
||||
|
||||
#endif
|
||||
|
@ -347,7 +347,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
|
||||
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
|
||||
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
|
||||
| FEATURE_MASK(FEATURE_BACO_BIT)
|
||||
| FEATURE_MASK(FEATURE_ACDC_BIT)
|
||||
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
|
||||
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
|
||||
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
|
||||
@ -391,6 +390,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
|
||||
if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
|
||||
|
||||
if (smu->dc_controlled_by_gpio)
|
||||
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
|
||||
|
||||
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
|
||||
if (is_asic_secure(smu)) {
|
||||
/* only for navi10 A0 */
|
||||
@ -525,6 +527,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
|
||||
|
||||
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
|
||||
|
||||
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
|
||||
smu->dc_controlled_by_gpio = true;
|
||||
|
||||
mutex_lock(&smu_baco->mutex);
|
||||
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
|
||||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
|
||||
@ -2369,6 +2374,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
||||
.get_pptable_power_limit = navi10_get_pptable_power_limit,
|
||||
.run_btc = navi10_run_btc,
|
||||
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
|
||||
.set_power_source = smu_v11_0_set_power_source,
|
||||
};
|
||||
|
||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||
|
@ -211,4 +211,7 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
|
||||
#define smu_disable_umc_cdr_12gbps_workaround(smu) \
|
||||
((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
|
||||
|
||||
#define smu_set_power_source(smu, power_src) \
|
||||
((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
|
||||
|
||||
#endif
|
||||
|
@ -1525,6 +1525,13 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
|
||||
{
|
||||
return smu_send_smc_msg(smu,
|
||||
SMU_MSG_ReenableAcDcInterrupt,
|
||||
NULL);
|
||||
}
|
||||
|
||||
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
|
||||
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
|
||||
|
||||
@ -1558,6 +1565,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
||||
break;
|
||||
|
||||
}
|
||||
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
|
||||
if (src_id == 0xfe)
|
||||
smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1597,6 +1607,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
|
||||
0xfe,
|
||||
irq_src);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1939,3 +1955,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_set_power_source(struct smu_context *smu,
|
||||
enum smu_power_src_type power_src)
|
||||
{
|
||||
int pwr_source;
|
||||
|
||||
pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
|
||||
if (pwr_source < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return smu_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_NotifyPowerSource,
|
||||
pwr_source,
|
||||
NULL);
|
||||
}
|
||||
|
||||
|
@ -2979,7 +2979,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||
|
||||
if (rdev->family == CHIP_HAINAN) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0xC3) ||
|
||||
(rdev->pdev->device == 0x6664) ||
|
||||
(rdev->pdev->device == 0x6665) ||
|
||||
|
@ -651,7 +651,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
|
||||
trace_drm_sched_process_job(s_fence);
|
||||
|
||||
dma_fence_get(&s_fence->finished);
|
||||
drm_sched_fence_finished(s_fence);
|
||||
dma_fence_put(&s_fence->finished);
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user