drm pull fixes for 5.3-rc3
amdgpu: navi10 temperature and pstate fixes vcn dynamic power management fix CS ioctl error handling fix debugfs info leak fix amdkfd VegaM fix. msm: dma sync call fix mdp5 dsi command mode fix fall-through fixes disabled GPU fix nouveau: regression fix for displayport MST support. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdQ5SQAAoJEAx081l5xIa+m4gP/jaCuo50oIAoTcKGWi6JM4FB 6dg3peqNSLx+pGnNHOXx3hYUyf2KfUZ2CtTYvab4xFjES//rJOy+HwrtbjVvW0AG RfFDVHB2Vsz5e23TxVJBkodJwudsuVCUsINHGDg3o9GYOnZDe8bO7aiEiZ7xbFYA 29DOCXuLx8JvN3Gm/HKVrasIY7T3mm84/L1Yo72OhExSipyfGImBsxZ5gW1K7GzO XL+I8W2h9ViBEsy7DfRGAqn5SwSl81JePf1X73Fl3ZEzMB9drtt/+BsJc2eAfacP aG3vQL8i1EdzGGMqLZTy0csfr76Bp8hnZIhKdmyYVq2zTGd+J5SCQWlaJ/ov0vTj dzPMfbLysVzQzxOdQNAkokyM1N+r2QicjR1W8jki/BCGoZhrhoMH3JA9qgxnLVw9 ggem6rMSb5yXnp4JxCOBWZzROON06hdaiGBHAeXqq+mU9DFj/xTqlloUE3Ln/ncs 2HVFxsN6+tNN5vEFzKPPJYk+OpW8+r5UfWJmcpaDfMiuzBWyuqOkw2+DFrfdrQDp ubDC7O0ZBlhfEcUjHVYbFJc5lv7ip21DxHGEUnDdPdePi+/UKOq6ySDM2yE+rshL Sg05Ifu17uUGwd2gtDKaIcX9RMxwc8TdSt3CRhAVl2JrCLFsbQwhntWthIHlndmn OkzCKmsylr2lX19GEh5J =jOZ/ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-08-02' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Thanks to Daniel for handling the email the last couple of weeks, flus and break-ins combined to derail me. Surprised nothing materialised today to take me out again. Just more amdgpu navi fixes, msm fixes and a single nouveau regression fix: amdgpu: - navi10 temperature and pstate fixes - vcn dynamic power management fix - CS ioctl error handling fix - debugfs info leak fix - amdkfd VegaM fix msm: - dma sync call fix - mdp5 dsi command mode fix - fall-through fixes - disabled GPU fix nouveau: - regression fix for displayport MST support" * tag 'drm-fixes-2019-08-02' of git://anongit.freedesktop.org/drm/drm: drm/nouveau: Only release VCPI slots on mode changes drm: msm: Fix add_gpu_components drm/msm: Annotate intentional switch statement fall throughs drm/msm: add support for per-CRTC max_vblank_count on mdp5 drm/msm: Use the correct dma_sync calls in msm_gem drm/amd/powerplay: correct UVD/VCE/VCN power status retrieval drm/amd/powerplay: correct Navi10 VCN powergate control (v2) drm/amd/powerplay: support VCN powergate status retrieval for SW SMU drm/amd/powerplay: support VCN powergate status retrieval on Raven drm/amd/powerplay: add new sensor type for VCN powergate status drm/amdgpu: fix a potential information leaking bug drm/amdgpu: fix error handling in amdgpu_cs_process_fence_dep drm/amd/powerplay: enable SW SMU reset functionality drm/amd/powerplay: fix null pointer dereference around dpm state relates drm/amdgpu/powerplay: use proper revision id for navi drm/amd/powerplay: fix temperature granularity error in smu11 drm/amd/powerplay: add callback function of get_thermal_temperature_range drm/amdkfd: Fix byte align on VegaM
This commit is contained in:
commit
f26dbb2302
@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||||||
adev->asic_type != CHIP_FIJI &&
|
adev->asic_type != CHIP_FIJI &&
|
||||||
adev->asic_type != CHIP_POLARIS10 &&
|
adev->asic_type != CHIP_POLARIS10 &&
|
||||||
adev->asic_type != CHIP_POLARIS11 &&
|
adev->asic_type != CHIP_POLARIS11 &&
|
||||||
adev->asic_type != CHIP_POLARIS12) ?
|
adev->asic_type != CHIP_POLARIS12 &&
|
||||||
|
adev->asic_type != CHIP_VEGAM) ?
|
||||||
VI_BO_SIZE_ALIGN : 1;
|
VI_BO_SIZE_ALIGN : 1;
|
||||||
|
|
||||||
mapping_flags = AMDGPU_VM_PAGE_READABLE;
|
mapping_flags = AMDGPU_VM_PAGE_READABLE;
|
||||||
|
@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
fence = amdgpu_ctx_get_fence(ctx, entity,
|
fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
|
||||||
deps[i].handle);
|
amdgpu_ctx_put(ctx);
|
||||||
|
|
||||||
|
if (IS_ERR(fence))
|
||||||
|
return PTR_ERR(fence);
|
||||||
|
else if (!fence)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
|
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
|
||||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
|
struct drm_sched_fence *s_fence;
|
||||||
struct dma_fence *old = fence;
|
struct dma_fence *old = fence;
|
||||||
|
|
||||||
|
s_fence = to_drm_sched_fence(fence);
|
||||||
fence = dma_fence_get(&s_fence->scheduled);
|
fence = dma_fence_get(&s_fence->scheduled);
|
||||||
dma_fence_put(old);
|
dma_fence_put(old);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ERR(fence)) {
|
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
|
||||||
r = PTR_ERR(fence);
|
dma_fence_put(fence);
|
||||||
amdgpu_ctx_put(ctx);
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
} else if (fence) {
|
|
||||||
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
|
|
||||||
true);
|
|
||||||
dma_fence_put(fence);
|
|
||||||
amdgpu_ctx_put(ctx);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
|||||||
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
|
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
|
||||||
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
|
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
|
||||||
|
|
||||||
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
|
data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
|
|||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
enum amd_pm_state_type pm;
|
enum amd_pm_state_type pm;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
|
if (is_support_sw_smu(adev)) {
|
||||||
pm = amdgpu_smu_get_current_power_state(adev);
|
if (adev->smu.ppt_funcs->get_current_power_state)
|
||||||
else if (adev->powerplay.pp_funcs->get_current_power_state)
|
pm = amdgpu_smu_get_current_power_state(adev);
|
||||||
|
else
|
||||||
|
pm = adev->pm.dpm.user_state;
|
||||||
|
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
|
||||||
pm = amdgpu_dpm_get_current_power_state(adev);
|
pm = amdgpu_dpm_get_current_power_state(adev);
|
||||||
else
|
} else {
|
||||||
pm = adev->pm.dpm.user_state;
|
pm = adev->pm.dpm.user_state;
|
||||||
|
}
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||||
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
|
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
|
||||||
@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
if (is_support_sw_smu(adev)) {
|
||||||
|
mutex_lock(&adev->pm.mutex);
|
||||||
|
adev->pm.dpm.user_state = state;
|
||||||
|
mutex_unlock(&adev->pm.mutex);
|
||||||
|
} else if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
|
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
|
|||||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
|
||||||
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
|
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
|
||||||
|
|
||||||
/* UVD clocks */
|
if (adev->asic_type > CHIP_VEGA20) {
|
||||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
|
/* VCN clocks */
|
||||||
if (!value) {
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
|
||||||
seq_printf(m, "UVD: Disabled\n");
|
if (!value) {
|
||||||
} else {
|
seq_printf(m, "VCN: Disabled\n");
|
||||||
seq_printf(m, "UVD: Enabled\n");
|
} else {
|
||||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
|
seq_printf(m, "VCN: Enabled\n");
|
||||||
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
|
||||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
|
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
|
||||||
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
|
||||||
|
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
seq_printf(m, "\n");
|
||||||
seq_printf(m, "\n");
|
} else {
|
||||||
|
/* UVD clocks */
|
||||||
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
|
||||||
|
if (!value) {
|
||||||
|
seq_printf(m, "UVD: Disabled\n");
|
||||||
|
} else {
|
||||||
|
seq_printf(m, "UVD: Enabled\n");
|
||||||
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
|
||||||
|
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
|
||||||
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
|
||||||
|
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq_printf(m, "\n");
|
||||||
|
|
||||||
/* VCE clocks */
|
/* VCE clocks */
|
||||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
|
||||||
if (!value) {
|
if (!value) {
|
||||||
seq_printf(m, "VCE: Disabled\n");
|
seq_printf(m, "VCE: Disabled\n");
|
||||||
} else {
|
} else {
|
||||||
seq_printf(m, "VCE: Enabled\n");
|
seq_printf(m, "VCE: Enabled\n");
|
||||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
|
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
|
||||||
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
|
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,6 +123,7 @@ enum amd_pp_sensors {
|
|||||||
AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
|
AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
|
||||||
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||||||
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||||||
|
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum amd_pp_task {
|
enum amd_pp_task {
|
||||||
|
@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
|
|||||||
|
|
||||||
/* not support power state */
|
/* not support power state */
|
||||||
memset(state_info, 0, sizeof(struct pp_states_info));
|
memset(state_info, 0, sizeof(struct pp_states_info));
|
||||||
state_info->nums = 0;
|
state_info->nums = 1;
|
||||||
|
state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -337,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
|
|||||||
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
|
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
|
||||||
*size = 4;
|
*size = 4;
|
||||||
break;
|
break;
|
||||||
|
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
|
||||||
|
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
|
||||||
|
*size = 4;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
@ -723,6 +728,12 @@ static int smu_sw_init(void *handle)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = smu_register_irq_handler(smu);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Failed to register smc irq handler!\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -732,6 +743,9 @@ static int smu_sw_fini(void *handle)
|
|||||||
struct smu_context *smu = &adev->smu;
|
struct smu_context *smu = &adev->smu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
kfree(smu->irq_source);
|
||||||
|
smu->irq_source = NULL;
|
||||||
|
|
||||||
ret = smu_smc_table_sw_fini(smu);
|
ret = smu_smc_table_sw_fini(smu);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("Failed to sw fini smc table!\n");
|
pr_err("Failed to sw fini smc table!\n");
|
||||||
@ -1088,10 +1102,6 @@ static int smu_hw_init(void *handle)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
ret = smu_register_irq_handler(smu);
|
|
||||||
if (ret)
|
|
||||||
goto failed;
|
|
||||||
|
|
||||||
if (!smu->pm_enabled)
|
if (!smu->pm_enabled)
|
||||||
adev->pm.dpm_enabled = false;
|
adev->pm.dpm_enabled = false;
|
||||||
else
|
else
|
||||||
@ -1121,9 +1131,6 @@ static int smu_hw_fini(void *handle)
|
|||||||
kfree(table_context->overdrive_table);
|
kfree(table_context->overdrive_table);
|
||||||
table_context->overdrive_table = NULL;
|
table_context->overdrive_table = NULL;
|
||||||
|
|
||||||
kfree(smu->irq_source);
|
|
||||||
smu->irq_source = NULL;
|
|
||||||
|
|
||||||
ret = smu_fini_fb_allocations(smu);
|
ret = smu_fini_fb_allocations(smu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
|
|||||||
static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
||||||
void *value, int *size)
|
void *value, int *size)
|
||||||
{
|
{
|
||||||
|
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||||||
uint32_t sclk, mclk;
|
uint32_t sclk, mclk;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
|||||||
case AMDGPU_PP_SENSOR_GPU_TEMP:
|
case AMDGPU_PP_SENSOR_GPU_TEMP:
|
||||||
*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
|
*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
|
||||||
break;
|
break;
|
||||||
|
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
|
||||||
|
*(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
|
||||||
|
*size = 4;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
|
|||||||
|
|
||||||
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
|
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
|
||||||
{
|
{
|
||||||
|
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||||||
|
|
||||||
if (bgate) {
|
if (bgate) {
|
||||||
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
|
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
|
||||||
AMD_IP_BLOCK_TYPE_VCN,
|
AMD_IP_BLOCK_TYPE_VCN,
|
||||||
AMD_PG_STATE_GATE);
|
AMD_PG_STATE_GATE);
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_PowerDownVcn, 0);
|
PPSMC_MSG_PowerDownVcn, 0);
|
||||||
|
smu10_data->vcn_power_gated = true;
|
||||||
} else {
|
} else {
|
||||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||||
PPSMC_MSG_PowerUpVcn, 0);
|
PPSMC_MSG_PowerUpVcn, 0);
|
||||||
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
|
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
|
||||||
AMD_IP_BLOCK_TYPE_VCN,
|
AMD_IP_BLOCK_TYPE_VCN,
|
||||||
AMD_PG_STATE_UNGATE);
|
AMD_PG_STATE_UNGATE);
|
||||||
|
smu10_data->vcn_power_gated = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,7 +429,6 @@ struct smu_table_context
|
|||||||
struct smu_table *tables;
|
struct smu_table *tables;
|
||||||
uint32_t table_count;
|
uint32_t table_count;
|
||||||
struct smu_table memory_pool;
|
struct smu_table memory_pool;
|
||||||
uint16_t software_shutdown_temp;
|
|
||||||
uint8_t thermal_controller_type;
|
uint8_t thermal_controller_type;
|
||||||
uint16_t TDPODLimit;
|
uint16_t TDPODLimit;
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
#include "pp_debug.h"
|
#include "pp_debug.h"
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_smu.h"
|
#include "amdgpu_smu.h"
|
||||||
#include "atomfirmware.h"
|
#include "atomfirmware.h"
|
||||||
@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||||||
static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
|
static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct smu_power_context *smu_power = &smu->smu_power;
|
|
||||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
|
||||||
|
|
||||||
if (enable && power_gate->uvd_gated) {
|
if (enable) {
|
||||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
|
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
|
||||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
|
if (ret)
|
||||||
if (ret)
|
return ret;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
power_gate->uvd_gated = false;
|
|
||||||
} else {
|
} else {
|
||||||
if (!enable && !power_gate->uvd_gated) {
|
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
|
||||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
|
if (ret)
|
||||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
|
return ret;
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
power_gate->uvd_gated = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
||||||
@ -1573,7 +1566,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
|
|||||||
uint32_t sclk_freq = 0, uclk_freq = 0;
|
uint32_t sclk_freq = 0, uclk_freq = 0;
|
||||||
uint32_t uclk_level = 0;
|
uint32_t uclk_level = 0;
|
||||||
|
|
||||||
switch (adev->rev_id) {
|
switch (adev->pdev->revision) {
|
||||||
case 0xf0: /* XTX */
|
case 0xf0: /* XTX */
|
||||||
case 0xc0:
|
case 0xc0:
|
||||||
sclk_freq = NAVI10_PEAK_SCLK_XTX;
|
sclk_freq = NAVI10_PEAK_SCLK_XTX;
|
||||||
@ -1620,6 +1613,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int navi10_get_thermal_temperature_range(struct smu_context *smu,
|
||||||
|
struct smu_temperature_range *range)
|
||||||
|
{
|
||||||
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
|
struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
|
||||||
|
|
||||||
|
if (!range || !powerplay_table)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* The unit is temperature */
|
||||||
|
range->min = 0;
|
||||||
|
range->max = powerplay_table->software_shutdown_temp;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct pptable_funcs navi10_ppt_funcs = {
|
static const struct pptable_funcs navi10_ppt_funcs = {
|
||||||
.tables_init = navi10_tables_init,
|
.tables_init = navi10_tables_init,
|
||||||
.alloc_dpm_context = navi10_allocate_dpm_context,
|
.alloc_dpm_context = navi10_allocate_dpm_context,
|
||||||
@ -1657,6 +1666,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||||||
.get_ppfeature_status = navi10_get_ppfeature_status,
|
.get_ppfeature_status = navi10_get_ppfeature_status,
|
||||||
.set_ppfeature_status = navi10_set_ppfeature_status,
|
.set_ppfeature_status = navi10_set_ppfeature_status,
|
||||||
.set_performance_level = navi10_set_performance_level,
|
.set_performance_level = navi10_set_performance_level,
|
||||||
|
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
|
||||||
};
|
};
|
||||||
|
|
||||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||||
|
@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
|
|||||||
struct smu_temperature_range *range)
|
struct smu_temperature_range *range)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
|
int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
|
||||||
int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
|
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
if (!range)
|
if (!range)
|
||||||
@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
|
|||||||
if (high > range->max)
|
if (high > range->max)
|
||||||
high = range->max;
|
high = range->max;
|
||||||
|
|
||||||
|
low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
|
||||||
|
high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
|
||||||
|
|
||||||
if (low > high)
|
if (low > high)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
|
|||||||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
|
||||||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
|
||||||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
|
||||||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
|
||||||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
|
||||||
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
||||||
|
|
||||||
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
|
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
|
||||||
@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
|
|||||||
|
|
||||||
if (!smu->pm_enabled)
|
if (!smu->pm_enabled)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = smu_get_thermal_temperature_range(smu, &range);
|
ret = smu_get_thermal_temperature_range(smu, &range);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (smu->smu_table.thermal_controller_type) {
|
if (smu->smu_table.thermal_controller_type) {
|
||||||
ret = smu_v11_0_set_thermal_range(smu, &range);
|
ret = smu_v11_0_set_thermal_range(smu, &range);
|
||||||
@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->pm.dpm.thermal.min_temp = range.min;
|
adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.max_temp = range.max;
|
adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
|
adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
|
adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
|
adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
|
adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
|
adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
|
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
|
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
|
adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
|
adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
|
|||||||
memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
|
memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
|
||||||
sizeof(PPTable_t));
|
sizeof(PPTable_t));
|
||||||
|
|
||||||
table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
|
|
||||||
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
|
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
|
||||||
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
|
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
|
||||||
|
|
||||||
@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct smu_temperature_range vega20_thermal_policy[] =
|
|
||||||
{
|
|
||||||
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
|
|
||||||
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
|
|
||||||
};
|
|
||||||
|
|
||||||
static int vega20_get_thermal_temperature_range(struct smu_context *smu,
|
static int vega20_get_thermal_temperature_range(struct smu_context *smu,
|
||||||
struct smu_temperature_range *range)
|
struct smu_temperature_range *range)
|
||||||
{
|
{
|
||||||
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
|
ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
|
||||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||||
|
|
||||||
if (!range)
|
if (!range || !powerplay_table)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range));
|
/* The unit is temperature */
|
||||||
|
range->min = 0;
|
||||||
range->max = pptable->TedgeLimit *
|
range->max = powerplay_table->usSoftwareShutdownTemp;
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
|
||||||
range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
|
range->hotspot_crit_max = pptable->ThotspotLimit;
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
|
||||||
range->hotspot_crit_max = pptable->ThotspotLimit *
|
range->mem_crit_max = pptable->ThbmLimit;
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
|
||||||
range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
|
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
||||||
range->mem_crit_max = pptable->ThbmLimit *
|
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
||||||
range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
|
|
||||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
|
|||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
if (priv->lastctx == ctx)
|
if (priv->lastctx == ctx)
|
||||||
break;
|
break;
|
||||||
|
/* fall-thru */
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
/* copy commands into RB: */
|
/* copy commands into RB: */
|
||||||
obj = submit->bos[submit->cmd[i].idx].obj;
|
obj = submit->bos[submit->cmd[i].idx].obj;
|
||||||
@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
if (priv->lastctx == ctx)
|
if (priv->lastctx == ctx)
|
||||||
break;
|
break;
|
||||||
|
/* fall-thru */
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||||
|
@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||||
if (priv->lastctx == ctx)
|
if (priv->lastctx == ctx)
|
||||||
break;
|
break;
|
||||||
|
/* fall-thru */
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||||
|
@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||||||
/* ignore if there has not been a ctx switch: */
|
/* ignore if there has not been a ctx switch: */
|
||||||
if (priv->lastctx == ctx)
|
if (priv->lastctx == ctx)
|
||||||
break;
|
break;
|
||||||
|
/* fall-thru */
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
|
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
|
||||||
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
|
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
|
||||||
|
@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||||||
mdp5_crtc->enabled = false;
|
mdp5_crtc->enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||||
|
struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
|
||||||
|
u32 count;
|
||||||
|
|
||||||
|
count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
|
||||||
|
drm_crtc_set_max_vblank_count(crtc, count);
|
||||||
|
|
||||||
|
drm_crtc_vblank_on(crtc);
|
||||||
|
}
|
||||||
|
|
||||||
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
|
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *old_state)
|
struct drm_crtc_state *old_state)
|
||||||
{
|
{
|
||||||
@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Restore vblank irq handling after power is enabled */
|
/* Restore vblank irq handling after power is enabled */
|
||||||
drm_crtc_vblank_on(crtc);
|
mdp5_crtc_vblank_on(crtc);
|
||||||
|
|
||||||
mdp5_crtc_mode_set_nofb(crtc);
|
mdp5_crtc_mode_set_nofb(crtc);
|
||||||
|
|
||||||
@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
|
|||||||
mdp5_crtc_destroy_state(crtc, crtc->state);
|
mdp5_crtc_destroy_state(crtc, crtc->state);
|
||||||
|
|
||||||
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
|
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
|
||||||
|
|
||||||
|
drm_crtc_vblank_reset(crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||||||
|
@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
|||||||
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
|
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
|
||||||
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
|
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
|
||||||
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
|
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
|
||||||
dev->max_vblank_count = 0xffffffff;
|
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
|
||||||
dev->vblank_disable_immediate = true;
|
dev->vblank_disable_immediate = true;
|
||||||
|
|
||||||
return kms;
|
return kms;
|
||||||
|
@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
|
|||||||
if (!np)
|
if (!np)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
if (of_device_is_available(np))
|
||||||
|
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
||||||
|
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
|
|
||||||
|
@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
|
|||||||
return !msm_obj->vram_node;
|
return !msm_obj->vram_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
|
||||||
|
* API. Really GPU cache is out of scope here (handled on cmdstream)
|
||||||
|
* and all we need to do is invalidate newly allocated pages before
|
||||||
|
* mapping to CPU as uncached/writecombine.
|
||||||
|
*
|
||||||
|
* On top of this, we have the added headache, that depending on
|
||||||
|
* display generation, the display's iommu may be wired up to either
|
||||||
|
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
|
||||||
|
* that here we either have dma-direct or iommu ops.
|
||||||
|
*
|
||||||
|
* Let this be a cautionary tail of abstraction gone wrong.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void sync_for_device(struct msm_gem_object *msm_obj)
|
||||||
|
{
|
||||||
|
struct device *dev = msm_obj->base.dev->dev;
|
||||||
|
|
||||||
|
if (get_dma_ops(dev)) {
|
||||||
|
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
|
||||||
|
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||||
|
} else {
|
||||||
|
dma_map_sg(dev, msm_obj->sgt->sgl,
|
||||||
|
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sync_for_cpu(struct msm_gem_object *msm_obj)
|
||||||
|
{
|
||||||
|
struct device *dev = msm_obj->base.dev->dev;
|
||||||
|
|
||||||
|
if (get_dma_ops(dev)) {
|
||||||
|
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
|
||||||
|
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||||
|
} else {
|
||||||
|
dma_unmap_sg(dev, msm_obj->sgt->sgl,
|
||||||
|
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
||||||
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
|
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
|
||||||
{
|
{
|
||||||
@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
|||||||
* because display controller, GPU, etc. are not coherent:
|
* because display controller, GPU, etc. are not coherent:
|
||||||
*/
|
*/
|
||||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||||
dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
|
sync_for_device(msm_obj);
|
||||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return msm_obj->pages;
|
return msm_obj->pages;
|
||||||
@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
|
|||||||
* GPU, etc. are not coherent:
|
* GPU, etc. are not coherent:
|
||||||
*/
|
*/
|
||||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||||
dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
|
sync_for_cpu(msm_obj);
|
||||||
msm_obj->sgt->nents,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
|
|
||||||
sg_free_table(msm_obj->sgt);
|
sg_free_table(msm_obj->sgt);
|
||||||
kfree(msm_obj->sgt);
|
kfree(msm_obj->sgt);
|
||||||
|
@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
|
|||||||
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
|
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
|
||||||
connector->display_info.bpc * 3);
|
connector->display_info.bpc * 3);
|
||||||
|
|
||||||
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
|
if (crtc_state->mode_changed) {
|
||||||
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
|
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
|
||||||
mstc->port,
|
mstc->port,
|
||||||
asyh->dp.pbn);
|
asyh->dp.pbn);
|
||||||
|
Loading…
Reference in New Issue
Block a user